Attachment 'MC2_litmusrt_ipdps15.patch'

Download

   1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
   2 index 8dd7248..48128ed 100644
   3 --- a/Documentation/kernel-parameters.txt
   4 +++ b/Documentation/kernel-parameters.txt
   5 @@ -879,6 +879,7 @@ and is between 256 and 4096 characters. It is defined in the file
   6  			     controller
   7  	i8042.nopnp	[HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
   8  			     controllers
   9 +	i8042.notimeout	[HW] Ignore timeout condition signalled by conroller
  10  	i8042.reset	[HW] Reset the controller during init and cleanup
  11  	i8042.unlock	[HW] Unlock (ignore) the keylock
  12  
  13 @@ -1745,7 +1746,7 @@ and is between 256 and 4096 characters. It is defined in the file
  14  
  15  	nousb		[USB] Disable the USB subsystem
  16  
  17 -	nowatchdog	[KNL] Disable the lockup detector.
  18 +	nowatchdog	[KNL] Disable the lockup detector (NMI watchdog).
  19  
  20  	nowb		[ARM]
  21  
  22 diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
  23 index 55b859b..506d9d9 100644
  24 --- a/Documentation/power/runtime_pm.txt
  25 +++ b/Documentation/power/runtime_pm.txt
  26 @@ -336,8 +336,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
  27        zero)
  28  
  29    bool pm_runtime_suspended(struct device *dev);
  30 -    - return true if the device's runtime PM status is 'suspended', or false
  31 -      otherwise
  32 +    - return true if the device's runtime PM status is 'suspended' and its
  33 +      'power.disable_depth' field is equal to zero, or false otherwise
  34  
  35    void pm_runtime_allow(struct device *dev);
  36      - set the power.runtime_auto flag for the device and decrease its usage
  37 diff --git a/Makefile b/Makefile
  38 index 8e53f47..77a7eb5 100644
  39 --- a/Makefile
  40 +++ b/Makefile
  41 @@ -1,7 +1,7 @@
  42  VERSION = 2
  43  PATCHLEVEL = 6
  44  SUBLEVEL = 36
  45 -EXTRAVERSION =-litmus2010
  46 +EXTRAVERSION =.4-litmus2010
  47  NAME = Flesh-Eating Bats with Fangs
  48  
  49  # *DOCUMENTATION*
  50 diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
  51 index 6e8f05c..d757555 100644
  52 --- a/arch/arm/include/asm/assembler.h
  53 +++ b/arch/arm/include/asm/assembler.h
  54 @@ -215,7 +215,7 @@
  55  	@ Slightly optimised to avoid incrementing the pointer twice
  56  	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
  57  	.if	\rept == 2
  58 -	usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
  59 +	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
  60  	.endif
  61  
  62  	add\cond \ptr, #\rept * \inc
  63 diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
  64 index 0826599..48066ce 100644
  65 --- a/arch/arm/include/asm/kgdb.h
  66 +++ b/arch/arm/include/asm/kgdb.h
  67 @@ -70,7 +70,8 @@ extern int kgdb_fault_expected;
  68  #define _GP_REGS		16
  69  #define _FP_REGS		8
  70  #define _EXTRA_REGS		2
  71 -#define DBG_MAX_REG_NUM		(_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
  72 +#define GDB_MAX_REGS		(_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
  73 +#define DBG_MAX_REG_NUM		(_GP_REGS + _FP_REGS + _EXTRA_REGS)
  74  
  75  #define KGDB_MAX_NO_CPUS	1
  76  #define BUFMAX			400
  77 @@ -93,7 +94,7 @@ extern int kgdb_fault_expected;
  78  #define _SPT			13
  79  #define _LR			14
  80  #define _PC			15
  81 -#define _CPSR			(DBG_MAX_REG_NUM - 1)
  82 +#define _CPSR			(GDB_MAX_REGS - 1)
  83  
  84  /*
  85   * So that we can denote the end of a frame for tracing,
  86 diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
  87 index 584a683..b99087a 100644
  88 --- a/arch/arm/kernel/calls.S
  89 +++ b/arch/arm/kernel/calls.S
  90 @@ -384,14 +384,12 @@
  91  		CALL(sys_complete_job)
  92  		CALL(sys_od_open)
  93  		CALL(sys_od_close)
  94 -/* 375 */	CALL(sys_fmlp_down)
  95 -		CALL(sys_fmlp_up)
  96 -		CALL(sys_srp_down)
  97 -		CALL(sys_srp_up)
  98 +/* 375 */	CALL(sys_litmus_lock)
  99 +		CALL(sys_litmus_unlock)
 100  		CALL(sys_query_job_no)
 101 -/* 380 */	CALL(sys_wait_for_job_release)
 102 +		CALL(sys_wait_for_job_release)
 103  		CALL(sys_wait_for_ts_release)
 104 -		CALL(sys_release_ts)
 105 +/* 380 */	CALL(sys_release_ts)
 106  		CALL(sys_null_call)
 107  #ifndef syscalls_counted
 108  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 109 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
 110 index d6e8b4d..778c2f7 100644
 111 --- a/arch/arm/kernel/kgdb.c
 112 +++ b/arch/arm/kernel/kgdb.c
 113 @@ -79,7 +79,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
 114  		return;
 115  
 116  	/* Initialize to zero */
 117 -	for (regno = 0; regno < DBG_MAX_REG_NUM; regno++)
 118 +	for (regno = 0; regno < GDB_MAX_REGS; regno++)
 119  		gdb_regs[regno] = 0;
 120  
 121  	/* Otherwise, we have only some registers from switch_to() */
 122 diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
 123 index 1e4cbd4..64f6bc1 100644
 124 --- a/arch/arm/lib/findbit.S
 125 +++ b/arch/arm/lib/findbit.S
 126 @@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be)
 127   */
 128  .L_found:
 129  #if __LINUX_ARM_ARCH__ >= 5
 130 -		rsb	r1, r3, #0
 131 -		and	r3, r3, r1
 132 +		rsb	r0, r3, #0
 133 +		and	r3, r3, r0
 134  		clz	r3, r3
 135  		rsb	r3, r3, #31
 136  		add	r0, r2, r3
 137 @@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be)
 138  		addeq	r2, r2, #1
 139  		mov	r0, r2
 140  #endif
 141 +		cmp	r1, r0			@ Clamp to maxbit
 142 +		movlo	r0, r1
 143  		mov	pc, lr
 144  
 145 diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/arch/arm/mach-at91/include/mach/at91_mci.h
 146 index 57f8ee1..27ac6f5 100644
 147 --- a/arch/arm/mach-at91/include/mach/at91_mci.h
 148 +++ b/arch/arm/mach-at91/include/mach/at91_mci.h
 149 @@ -74,6 +74,8 @@
 150  #define			AT91_MCI_TRTYP_BLOCK	(0 << 19)
 151  #define			AT91_MCI_TRTYP_MULTIPLE	(1 << 19)
 152  #define			AT91_MCI_TRTYP_STREAM	(2 << 19)
 153 +#define			AT91_MCI_TRTYP_SDIO_BYTE	(4 << 19)
 154 +#define			AT91_MCI_TRTYP_SDIO_BLOCK	(5 << 19)
 155  
 156  #define AT91_MCI_BLKR		0x18		/* Block Register */
 157  #define		AT91_MCI_BLKR_BCNT(n)	((0xffff & (n)) << 0)	/* Block count */
 158 diff --git a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
 159 index d16ce7e..9b50442 100644
 160 --- a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
 161 +++ b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
 162 @@ -10,7 +10,7 @@
 163   * published by the Free Software Foundation.
 164   */
 165  
 166 -		.macro	addruart,rx
 167 +		.macro	addruart,rx,rtmp
 168  		mrc	p15, 0, \rx, c1, c0
 169  		tst	\rx, #1			@ MMU enabled?
 170  		moveq	\rx,      #0x10000000
 171 diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
 172 index 38088c3..78defd7 100644
 173 --- a/arch/arm/mach-cns3xxx/pcie.c
 174 +++ b/arch/arm/mach-cns3xxx/pcie.c
 175 @@ -369,7 +369,7 @@ static int __init cns3xxx_pcie_init(void)
 176  {
 177  	int i;
 178  
 179 -	hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS,
 180 +	hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, 0,
 181  			"imprecise external abort");
 182  
 183  	for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) {
 184 diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
 185 index 86aa689..47010d8 100644
 186 --- a/arch/arm/mm/cache-v6.S
 187 +++ b/arch/arm/mm/cache-v6.S
 188 @@ -196,6 +196,10 @@ ENTRY(v6_flush_kern_dcache_area)
 189   *	- end     - virtual end address of region
 190   */
 191  v6_dma_inv_range:
 192 +#ifdef CONFIG_DMA_CACHE_RWFO
 193 +	ldrb	r2, [r0]			@ read for ownership
 194 +	strb	r2, [r0]			@ write for ownership
 195 +#endif
 196  	tst	r0, #D_CACHE_LINE_SIZE - 1
 197  	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 198  #ifdef HARVARD_CACHE
 199 @@ -204,6 +208,10 @@ v6_dma_inv_range:
 200  	mcrne	p15, 0, r0, c7, c11, 1		@ clean unified line
 201  #endif
 202  	tst	r1, #D_CACHE_LINE_SIZE - 1
 203 +#ifdef CONFIG_DMA_CACHE_RWFO
 204 +	ldrneb	r2, [r1, #-1]			@ read for ownership
 205 +	strneb	r2, [r1, #-1]			@ write for ownership
 206 +#endif
 207  	bic	r1, r1, #D_CACHE_LINE_SIZE - 1
 208  #ifdef HARVARD_CACHE
 209  	mcrne	p15, 0, r1, c7, c14, 1		@ clean & invalidate D line
 210 @@ -211,10 +219,6 @@ v6_dma_inv_range:
 211  	mcrne	p15, 0, r1, c7, c15, 1		@ clean & invalidate unified line
 212  #endif
 213  1:
 214 -#ifdef CONFIG_DMA_CACHE_RWFO
 215 -	ldr	r2, [r0]			@ read for ownership
 216 -	str	r2, [r0]			@ write for ownership
 217 -#endif
 218  #ifdef HARVARD_CACHE
 219  	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D line
 220  #else
 221 @@ -222,6 +226,10 @@ v6_dma_inv_range:
 222  #endif
 223  	add	r0, r0, #D_CACHE_LINE_SIZE
 224  	cmp	r0, r1
 225 +#ifdef CONFIG_DMA_CACHE_RWFO
 226 +	ldrlo	r2, [r0]			@ read for ownership
 227 +	strlo	r2, [r0]			@ write for ownership
 228 +#endif
 229  	blo	1b
 230  	mov	r0, #0
 231  	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 232 @@ -256,12 +264,12 @@ v6_dma_clean_range:
 233   *	- end     - virtual end address of region
 234   */
 235  ENTRY(v6_dma_flush_range)
 236 -	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 237 -1:
 238  #ifdef CONFIG_DMA_CACHE_RWFO
 239 -	ldr	r2, [r0]			@ read for ownership
 240 -	str	r2, [r0]			@ write for ownership
 241 +	ldrb	r2, [r0]		@ read for ownership
 242 +	strb	r2, [r0]		@ write for ownership
 243  #endif
 244 +	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 245 +1:
 246  #ifdef HARVARD_CACHE
 247  	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
 248  #else
 249 @@ -269,6 +277,10 @@ ENTRY(v6_dma_flush_range)
 250  #endif
 251  	add	r0, r0, #D_CACHE_LINE_SIZE
 252  	cmp	r0, r1
 253 +#ifdef CONFIG_DMA_CACHE_RWFO
 254 +	ldrlob	r2, [r0]			@ read for ownership
 255 +	strlob	r2, [r0]			@ write for ownership
 256 +#endif
 257  	blo	1b
 258  	mov	r0, #0
 259  	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 260 diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
 261 index 9b906de..56036ff 100644
 262 --- a/arch/arm/mm/fault-armv.c
 263 +++ b/arch/arm/mm/fault-armv.c
 264 @@ -65,6 +65,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
 265  	return ret;
 266  }
 267  
 268 +#if USE_SPLIT_PTLOCKS
 269 +/*
 270 + * If we are using split PTE locks, then we need to take the page
 271 + * lock here.  Otherwise we are using shared mm->page_table_lock
 272 + * which is already locked, thus cannot take it.
 273 + */
 274 +static inline void do_pte_lock(spinlock_t *ptl)
 275 +{
 276 +	/*
 277 +	 * Use nested version here to indicate that we are already
 278 +	 * holding one similar spinlock.
 279 +	 */
 280 +	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
 281 +}
 282 +
 283 +static inline void do_pte_unlock(spinlock_t *ptl)
 284 +{
 285 +	spin_unlock(ptl);
 286 +}
 287 +#else /* !USE_SPLIT_PTLOCKS */
 288 +static inline void do_pte_lock(spinlock_t *ptl) {}
 289 +static inline void do_pte_unlock(spinlock_t *ptl) {}
 290 +#endif /* USE_SPLIT_PTLOCKS */
 291 +
 292  static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
 293  	unsigned long pfn)
 294  {
 295 @@ -89,11 +113,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
 296  	 */
 297  	ptl = pte_lockptr(vma->vm_mm, pmd);
 298  	pte = pte_offset_map_nested(pmd, address);
 299 -	spin_lock(ptl);
 300 +	do_pte_lock(ptl);
 301  
 302  	ret = do_adjust_pte(vma, address, pfn, pte);
 303  
 304 -	spin_unlock(ptl);
 305 +	do_pte_unlock(ptl);
 306  	pte_unmap_nested(pte);
 307  
 308  	return ret;
 309 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
 310 index 7185b00..4e2f620 100644
 311 --- a/arch/arm/mm/init.c
 312 +++ b/arch/arm/mm/init.c
 313 @@ -282,6 +282,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
 314  	memblock_reserve(__pa(_stext), _end - _stext);
 315  #endif
 316  #ifdef CONFIG_BLK_DEV_INITRD
 317 +	if (phys_initrd_size &&
 318 +	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
 319 +		pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
 320 +		       phys_initrd_start, phys_initrd_size);
 321 +		phys_initrd_start = phys_initrd_size = 0;
 322 +	}
 323  	if (phys_initrd_size) {
 324  		memblock_reserve(phys_initrd_start, phys_initrd_size);
 325  
 326 diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
 327 index ec7eddf..f5c5b8d 100644
 328 --- a/arch/arm/plat-omap/dma.c
 329 +++ b/arch/arm/plat-omap/dma.c
 330 @@ -30,6 +30,7 @@
 331  #include <linux/irq.h>
 332  #include <linux/io.h>
 333  #include <linux/slab.h>
 334 +#include <linux/delay.h>
 335  
 336  #include <asm/system.h>
 337  #include <mach/hardware.h>
 338 @@ -996,11 +997,17 @@ void omap_start_dma(int lch)
 339  	l = dma_read(CCR(lch));
 340  
 341  	/*
 342 -	 * Errata: On ES2.0 BUFFERING disable must be set.
 343 -	 * This will always fail on ES1.0
 344 +	 * Errata: Inter Frame DMA buffering issue (All OMAP2420 and
 345 +	 * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and
 346 +	 * bursting is enabled. This might result in data gets stalled in
 347 +	 * FIFO at the end of the block.
 348 +	 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
 349 +	 * guarantee no data will stay in the DMA FIFO in case inter frame
 350 +	 * buffering occurs.
 351  	 */
 352 -	if (cpu_is_omap24xx())
 353 -		l |= OMAP_DMA_CCR_EN;
 354 +	if (cpu_is_omap2420() ||
 355 +	    (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
 356 +		l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
 357  
 358  	l |= OMAP_DMA_CCR_EN;
 359  	dma_write(l, CCR(lch));
 360 @@ -1018,8 +1025,39 @@ void omap_stop_dma(int lch)
 361  		dma_write(0, CICR(lch));
 362  
 363  	l = dma_read(CCR(lch));
 364 -	l &= ~OMAP_DMA_CCR_EN;
 365 -	dma_write(l, CCR(lch));
 366 +	/* OMAP3 Errata i541: sDMA FIFO draining does not finish */
 367 +	if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
 368 +		int i = 0;
 369 +		u32 sys_cf;
 370 +
 371 +		/* Configure No-Standby */
 372 +		l = dma_read(OCP_SYSCONFIG);
 373 +		sys_cf = l;
 374 +		l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
 375 +		l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
 376 +		dma_write(l , OCP_SYSCONFIG);
 377 +
 378 +		l = dma_read(CCR(lch));
 379 +		l &= ~OMAP_DMA_CCR_EN;
 380 +		dma_write(l, CCR(lch));
 381 +
 382 +		/* Wait for sDMA FIFO drain */
 383 +		l = dma_read(CCR(lch));
 384 +		while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
 385 +					OMAP_DMA_CCR_WR_ACTIVE))) {
 386 +			udelay(5);
 387 +			i++;
 388 +			l = dma_read(CCR(lch));
 389 +		}
 390 +		if (i >= 100)
 391 +			printk(KERN_ERR "DMA drain did not complete on "
 392 +					"lch %d\n", lch);
 393 +		/* Restore OCP_SYSCONFIG */
 394 +		dma_write(sys_cf, OCP_SYSCONFIG);
 395 +	} else {
 396 +		l &= ~OMAP_DMA_CCR_EN;
 397 +		dma_write(l, CCR(lch));
 398 +	}
 399  
 400  	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 401  		int next_lch, cur_lch = lch;
 402 diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
 403 index af3a039..cf66f85 100644
 404 --- a/arch/arm/plat-omap/include/plat/dma.h
 405 +++ b/arch/arm/plat-omap/include/plat/dma.h
 406 @@ -335,6 +335,10 @@
 407  #define OMAP2_DMA_MISALIGNED_ERR_IRQ	(1 << 11)
 408  
 409  #define OMAP_DMA_CCR_EN			(1 << 7)
 410 +#define OMAP_DMA_CCR_RD_ACTIVE		(1 << 9)
 411 +#define OMAP_DMA_CCR_WR_ACTIVE		(1 << 10)
 412 +#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC	(1 << 24)
 413 +#define OMAP_DMA_CCR_BUFFERING_DISABLE	(1 << 25)
 414  
 415  #define OMAP_DMA_DATA_TYPE_S8		0x00
 416  #define OMAP_DMA_DATA_TYPE_S16		0x01
 417 diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
 418 index ab608b7..730a461 100644
 419 --- a/arch/avr32/include/asm/syscalls.h
 420 +++ b/arch/avr32/include/asm/syscalls.h
 421 @@ -16,18 +16,9 @@
 422  #include <linux/signal.h>
 423  
 424  /* kernel/process.c */
 425 -asmlinkage int sys_fork(struct pt_regs *);
 426  asmlinkage int sys_clone(unsigned long, unsigned long,
 427  			 unsigned long, unsigned long,
 428  			 struct pt_regs *);
 429 -asmlinkage int sys_vfork(struct pt_regs *);
 430 -asmlinkage int sys_execve(const char __user *, char __user *__user *,
 431 -			  char __user *__user *, struct pt_regs *);
 432 -
 433 -/* kernel/signal.c */
 434 -asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *,
 435 -			       struct pt_regs *);
 436 -asmlinkage int sys_rt_sigreturn(struct pt_regs *);
 437  
 438  /* mm/cache.c */
 439  asmlinkage int sys_cacheflush(int, void __user *, size_t);
 440 diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
 441 index 592c707..b07d990 100644
 442 --- a/arch/microblaze/Makefile
 443 +++ b/arch/microblaze/Makefile
 444 @@ -72,12 +72,16 @@ export MMU DTB
 445  
 446  all: linux.bin
 447  
 448 -BOOT_TARGETS = linux.bin linux.bin.gz simpleImage.%
 449 +# With make 3.82 we cannot mix normal and wildcard targets
 450 +BOOT_TARGETS1 = linux.bin linux.bin.gz
 451 +BOOT_TARGETS2 = simpleImage.%
 452  
 453  archclean:
 454  	$(Q)$(MAKE) $(clean)=$(boot)
 455  
 456 -$(BOOT_TARGETS): vmlinux
 457 +$(BOOT_TARGETS1): vmlinux
 458 +	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 459 +$(BOOT_TARGETS2): vmlinux
 460  	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 461  
 462  define archhelp
 463 diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
 464 deleted file mode 120000
 465 index 7cb6578..0000000
 466 --- a/arch/microblaze/boot/dts/system.dts
 467 +++ /dev/null
 468 @@ -1 +0,0 @@
 469 -../../platform/generic/system.dts
 470 \ No newline at end of file
 471 diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
 472 new file mode 100644
 473 index 0000000..2d5c417
 474 --- /dev/null
 475 +++ b/arch/microblaze/boot/dts/system.dts
 476 @@ -0,0 +1,364 @@
 477 +/*
 478 + * Device Tree Generator version: 1.1
 479 + *
 480 + * (C) Copyright 2007-2008 Xilinx, Inc.
 481 + * (C) Copyright 2007-2009 Michal Simek
 482 + *
 483 + * Michal SIMEK <monstr@monstr.eu>
 484 + *
 485 + * This program is free software; you can redistribute it and/or
 486 + * modify it under the terms of the GNU General Public License as
 487 + * published by the Free Software Foundation; either version 2 of
 488 + * the License, or (at your option) any later version.
 489 + *
 490 + * This program is distributed in the hope that it will be useful,
 491 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 492 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 493 + * GNU General Public License for more details.
 494 + *
 495 + * You should have received a copy of the GNU General Public License
 496 + * along with this program; if not, write to the Free Software
 497 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
 498 + * MA 02111-1307 USA
 499 + *
 500 + * CAUTION: This file is automatically generated by libgen.
 501 + * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6
 502 + *
 503 + * XPS project directory: Xilinx-ML505-ll_temac-sgdma-MMU-FDT-edk101
 504 + */
 505 +
 506 +/dts-v1/;
 507 +/ {
 508 +	#address-cells = <1>;
 509 +	#size-cells = <1>;
 510 +	compatible = "xlnx,microblaze";
 511 +	hard-reset-gpios = <&LEDs_8Bit 2 1>;
 512 +	model = "testing";
 513 +	DDR2_SDRAM: memory@90000000 {
 514 +		device_type = "memory";
 515 +		reg = < 0x90000000 0x10000000 >;
 516 +	} ;
 517 +	aliases {
 518 +		ethernet0 = &Hard_Ethernet_MAC;
 519 +		serial0 = &RS232_Uart_1;
 520 +	} ;
 521 +	chosen {
 522 +		bootargs = "console=ttyUL0,115200 highres=on";
 523 +		linux,stdout-path = "/plb@0/serial@84000000";
 524 +	} ;
 525 +	cpus {
 526 +		#address-cells = <1>;
 527 +		#cpus = <0x1>;
 528 +		#size-cells = <0>;
 529 +		microblaze_0: cpu@0 {
 530 +			clock-frequency = <125000000>;
 531 +			compatible = "xlnx,microblaze-7.10.d";
 532 +			d-cache-baseaddr = <0x90000000>;
 533 +			d-cache-highaddr = <0x9fffffff>;
 534 +			d-cache-line-size = <0x10>;
 535 +			d-cache-size = <0x2000>;
 536 +			device_type = "cpu";
 537 +			i-cache-baseaddr = <0x90000000>;
 538 +			i-cache-highaddr = <0x9fffffff>;
 539 +			i-cache-line-size = <0x10>;
 540 +			i-cache-size = <0x2000>;
 541 +			model = "microblaze,7.10.d";
 542 +			reg = <0>;
 543 +			timebase-frequency = <125000000>;
 544 +			xlnx,addr-tag-bits = <0xf>;
 545 +			xlnx,allow-dcache-wr = <0x1>;
 546 +			xlnx,allow-icache-wr = <0x1>;
 547 +			xlnx,area-optimized = <0x0>;
 548 +			xlnx,cache-byte-size = <0x2000>;
 549 +			xlnx,d-lmb = <0x1>;
 550 +			xlnx,d-opb = <0x0>;
 551 +			xlnx,d-plb = <0x1>;
 552 +			xlnx,data-size = <0x20>;
 553 +			xlnx,dcache-addr-tag = <0xf>;
 554 +			xlnx,dcache-always-used = <0x1>;
 555 +			xlnx,dcache-byte-size = <0x2000>;
 556 +			xlnx,dcache-line-len = <0x4>;
 557 +			xlnx,dcache-use-fsl = <0x1>;
 558 +			xlnx,debug-enabled = <0x1>;
 559 +			xlnx,div-zero-exception = <0x1>;
 560 +			xlnx,dopb-bus-exception = <0x0>;
 561 +			xlnx,dynamic-bus-sizing = <0x1>;
 562 +			xlnx,edge-is-positive = <0x1>;
 563 +			xlnx,family = "virtex5";
 564 +			xlnx,fpu-exception = <0x1>;
 565 +			xlnx,fsl-data-size = <0x20>;
 566 +			xlnx,fsl-exception = <0x0>;
 567 +			xlnx,fsl-links = <0x0>;
 568 +			xlnx,i-lmb = <0x1>;
 569 +			xlnx,i-opb = <0x0>;
 570 +			xlnx,i-plb = <0x1>;
 571 +			xlnx,icache-always-used = <0x1>;
 572 +			xlnx,icache-line-len = <0x4>;
 573 +			xlnx,icache-use-fsl = <0x1>;
 574 +			xlnx,ill-opcode-exception = <0x1>;
 575 +			xlnx,instance = "microblaze_0";
 576 +			xlnx,interconnect = <0x1>;
 577 +			xlnx,interrupt-is-edge = <0x0>;
 578 +			xlnx,iopb-bus-exception = <0x0>;
 579 +			xlnx,mmu-dtlb-size = <0x4>;
 580 +			xlnx,mmu-itlb-size = <0x2>;
 581 +			xlnx,mmu-tlb-access = <0x3>;
 582 +			xlnx,mmu-zones = <0x10>;
 583 +			xlnx,number-of-pc-brk = <0x1>;
 584 +			xlnx,number-of-rd-addr-brk = <0x0>;
 585 +			xlnx,number-of-wr-addr-brk = <0x0>;
 586 +			xlnx,opcode-0x0-illegal = <0x1>;
 587 +			xlnx,pvr = <0x2>;
 588 +			xlnx,pvr-user1 = <0x0>;
 589 +			xlnx,pvr-user2 = <0x0>;
 590 +			xlnx,reset-msr = <0x0>;
 591 +			xlnx,sco = <0x0>;
 592 +			xlnx,unaligned-exceptions = <0x1>;
 593 +			xlnx,use-barrel = <0x1>;
 594 +			xlnx,use-dcache = <0x1>;
 595 +			xlnx,use-div = <0x1>;
 596 +			xlnx,use-ext-brk = <0x1>;
 597 +			xlnx,use-ext-nm-brk = <0x1>;
 598 +			xlnx,use-extended-fsl-instr = <0x0>;
 599 +			xlnx,use-fpu = <0x2>;
 600 +			xlnx,use-hw-mul = <0x2>;
 601 +			xlnx,use-icache = <0x1>;
 602 +			xlnx,use-interrupt = <0x1>;
 603 +			xlnx,use-mmu = <0x3>;
 604 +			xlnx,use-msr-instr = <0x1>;
 605 +			xlnx,use-pcmp-instr = <0x1>;
 606 +		} ;
 607 +	} ;
 608 +	mb_plb: plb@0 {
 609 +		#address-cells = <1>;
 610 +		#size-cells = <1>;
 611 +		compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus";
 612 +		ranges ;
 613 +		FLASH: flash@a0000000 {
 614 +			bank-width = <2>;
 615 +			compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
 616 +			reg = < 0xa0000000 0x2000000 >;
 617 +			xlnx,family = "virtex5";
 618 +			xlnx,include-datawidth-matching-0 = <0x1>;
 619 +			xlnx,include-datawidth-matching-1 = <0x0>;
 620 +			xlnx,include-datawidth-matching-2 = <0x0>;
 621 +			xlnx,include-datawidth-matching-3 = <0x0>;
 622 +			xlnx,include-negedge-ioregs = <0x0>;
 623 +			xlnx,include-plb-ipif = <0x1>;
 624 +			xlnx,include-wrbuf = <0x1>;
 625 +			xlnx,max-mem-width = <0x10>;
 626 +			xlnx,mch-native-dwidth = <0x20>;
 627 +			xlnx,mch-plb-clk-period-ps = <0x1f40>;
 628 +			xlnx,mch-splb-awidth = <0x20>;
 629 +			xlnx,mch0-accessbuf-depth = <0x10>;
 630 +			xlnx,mch0-protocol = <0x0>;
 631 +			xlnx,mch0-rddatabuf-depth = <0x10>;
 632 +			xlnx,mch1-accessbuf-depth = <0x10>;
 633 +			xlnx,mch1-protocol = <0x0>;
 634 +			xlnx,mch1-rddatabuf-depth = <0x10>;
 635 +			xlnx,mch2-accessbuf-depth = <0x10>;
 636 +			xlnx,mch2-protocol = <0x0>;
 637 +			xlnx,mch2-rddatabuf-depth = <0x10>;
 638 +			xlnx,mch3-accessbuf-depth = <0x10>;
 639 +			xlnx,mch3-protocol = <0x0>;
 640 +			xlnx,mch3-rddatabuf-depth = <0x10>;
 641 +			xlnx,mem0-width = <0x10>;
 642 +			xlnx,mem1-width = <0x20>;
 643 +			xlnx,mem2-width = <0x20>;
 644 +			xlnx,mem3-width = <0x20>;
 645 +			xlnx,num-banks-mem = <0x1>;
 646 +			xlnx,num-channels = <0x0>;
 647 +			xlnx,priority-mode = <0x0>;
 648 +			xlnx,synch-mem-0 = <0x0>;
 649 +			xlnx,synch-mem-1 = <0x0>;
 650 +			xlnx,synch-mem-2 = <0x0>;
 651 +			xlnx,synch-mem-3 = <0x0>;
 652 +			xlnx,synch-pipedelay-0 = <0x2>;
 653 +			xlnx,synch-pipedelay-1 = <0x2>;
 654 +			xlnx,synch-pipedelay-2 = <0x2>;
 655 +			xlnx,synch-pipedelay-3 = <0x2>;
 656 +			xlnx,tavdv-ps-mem-0 = <0x1adb0>;
 657 +			xlnx,tavdv-ps-mem-1 = <0x3a98>;
 658 +			xlnx,tavdv-ps-mem-2 = <0x3a98>;
 659 +			xlnx,tavdv-ps-mem-3 = <0x3a98>;
 660 +			xlnx,tcedv-ps-mem-0 = <0x1adb0>;
 661 +			xlnx,tcedv-ps-mem-1 = <0x3a98>;
 662 +			xlnx,tcedv-ps-mem-2 = <0x3a98>;
 663 +			xlnx,tcedv-ps-mem-3 = <0x3a98>;
 664 +			xlnx,thzce-ps-mem-0 = <0x88b8>;
 665 +			xlnx,thzce-ps-mem-1 = <0x1b58>;
 666 +			xlnx,thzce-ps-mem-2 = <0x1b58>;
 667 +			xlnx,thzce-ps-mem-3 = <0x1b58>;
 668 +			xlnx,thzoe-ps-mem-0 = <0x1b58>;
 669 +			xlnx,thzoe-ps-mem-1 = <0x1b58>;
 670 +			xlnx,thzoe-ps-mem-2 = <0x1b58>;
 671 +			xlnx,thzoe-ps-mem-3 = <0x1b58>;
 672 +			xlnx,tlzwe-ps-mem-0 = <0x88b8>;
 673 +			xlnx,tlzwe-ps-mem-1 = <0x0>;
 674 +			xlnx,tlzwe-ps-mem-2 = <0x0>;
 675 +			xlnx,tlzwe-ps-mem-3 = <0x0>;
 676 +			xlnx,twc-ps-mem-0 = <0x2af8>;
 677 +			xlnx,twc-ps-mem-1 = <0x3a98>;
 678 +			xlnx,twc-ps-mem-2 = <0x3a98>;
 679 +			xlnx,twc-ps-mem-3 = <0x3a98>;
 680 +			xlnx,twp-ps-mem-0 = <0x11170>;
 681 +			xlnx,twp-ps-mem-1 = <0x2ee0>;
 682 +			xlnx,twp-ps-mem-2 = <0x2ee0>;
 683 +			xlnx,twp-ps-mem-3 = <0x2ee0>;
 684 +			xlnx,xcl0-linesize = <0x4>;
 685 +			xlnx,xcl0-writexfer = <0x1>;
 686 +			xlnx,xcl1-linesize = <0x4>;
 687 +			xlnx,xcl1-writexfer = <0x1>;
 688 +			xlnx,xcl2-linesize = <0x4>;
 689 +			xlnx,xcl2-writexfer = <0x1>;
 690 +			xlnx,xcl3-linesize = <0x4>;
 691 +			xlnx,xcl3-writexfer = <0x1>;
 692 +		} ;
 693 +		Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
 694 +			#address-cells = <1>;
 695 +			#size-cells = <1>;
 696 +			compatible = "xlnx,compound";
 697 +			ethernet@81c00000 {
 698 +				compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
 699 +				device_type = "network";
 700 +				interrupt-parent = <&xps_intc_0>;
 701 +				interrupts = < 5 2 >;
 702 +				llink-connected = <&PIM3>;
 703 +				local-mac-address = [ 00 0a 35 00 00 00 ];
 704 +				reg = < 0x81c00000 0x40 >;
 705 +				xlnx,bus2core-clk-ratio = <0x1>;
 706 +				xlnx,phy-type = <0x1>;
 707 +				xlnx,phyaddr = <0x1>;
 708 +				xlnx,rxcsum = <0x0>;
 709 +				xlnx,rxfifo = <0x1000>;
 710 +				xlnx,temac-type = <0x0>;
 711 +				xlnx,txcsum = <0x0>;
 712 +				xlnx,txfifo = <0x1000>;
 713 +			} ;
 714 +		} ;
 715 +		IIC_EEPROM: i2c@81600000 {
 716 +			compatible = "xlnx,xps-iic-2.00.a";
 717 +			interrupt-parent = <&xps_intc_0>;
 718 +			interrupts = < 6 2 >;
 719 +			reg = < 0x81600000 0x10000 >;
 720 +			xlnx,clk-freq = <0x7735940>;
 721 +			xlnx,family = "virtex5";
 722 +			xlnx,gpo-width = <0x1>;
 723 +			xlnx,iic-freq = <0x186a0>;
 724 +			xlnx,scl-inertial-delay = <0x0>;
 725 +			xlnx,sda-inertial-delay = <0x0>;
 726 +			xlnx,ten-bit-adr = <0x0>;
 727 +		} ;
 728 +		LEDs_8Bit: gpio@81400000 {
 729 +			compatible = "xlnx,xps-gpio-1.00.a";
 730 +			interrupt-parent = <&xps_intc_0>;
 731 +			interrupts = < 7 2 >;
 732 +			reg = < 0x81400000 0x10000 >;
 733 +			xlnx,all-inputs = <0x0>;
 734 +			xlnx,all-inputs-2 = <0x0>;
 735 +			xlnx,dout-default = <0x0>;
 736 +			xlnx,dout-default-2 = <0x0>;
 737 +			xlnx,family = "virtex5";
 738 +			xlnx,gpio-width = <0x8>;
 739 +			xlnx,interrupt-present = <0x1>;
 740 +			xlnx,is-bidir = <0x1>;
 741 +			xlnx,is-bidir-2 = <0x1>;
 742 +			xlnx,is-dual = <0x0>;
 743 +			xlnx,tri-default = <0xffffffff>;
 744 +			xlnx,tri-default-2 = <0xffffffff>;
 745 +			#gpio-cells = <2>;
 746 +			gpio-controller;
 747 +		} ;
 748 +
 749 +		gpio-leds {
 750 +			compatible = "gpio-leds";
 751 +
 752 +			heartbeat {
 753 +				label = "Heartbeat";
 754 +				gpios = <&LEDs_8Bit 4 1>;
 755 +				linux,default-trigger = "heartbeat";
 756 +			};
 757 +
 758 +			yellow {
 759 +				label = "Yellow";
 760 +				gpios = <&LEDs_8Bit 5 1>;
 761 +			};
 762 +
 763 +			red {
 764 +				label = "Red";
 765 +				gpios = <&LEDs_8Bit 6 1>;
 766 +			};
 767 +
 768 +			green {
 769 +				label = "Green";
 770 +				gpios = <&LEDs_8Bit 7 1>;
 771 +			};
 772 +		} ;
 773 +		RS232_Uart_1: serial@84000000 {
 774 +			clock-frequency = <125000000>;
 775 +			compatible = "xlnx,xps-uartlite-1.00.a";
 776 +			current-speed = <115200>;
 777 +			device_type = "serial";
 778 +			interrupt-parent = <&xps_intc_0>;
 779 +			interrupts = < 8 0 >;
 780 +			port-number = <0>;
 781 +			reg = < 0x84000000 0x10000 >;
 782 +			xlnx,baudrate = <0x1c200>;
 783 +			xlnx,data-bits = <0x8>;
 784 +			xlnx,family = "virtex5";
 785 +			xlnx,odd-parity = <0x0>;
 786 +			xlnx,use-parity = <0x0>;
 787 +		} ;
 788 +		SysACE_CompactFlash: sysace@83600000 {
 789 +			compatible = "xlnx,xps-sysace-1.00.a";
 790 +			interrupt-parent = <&xps_intc_0>;
 791 +			interrupts = < 4 2 >;
 792 +			reg = < 0x83600000 0x10000 >;
 793 +			xlnx,family = "virtex5";
 794 +			xlnx,mem-width = <0x10>;
 795 +		} ;
 796 +		debug_module: debug@84400000 {
 797 +			compatible = "xlnx,mdm-1.00.d";
 798 +			reg = < 0x84400000 0x10000 >;
 799 +			xlnx,family = "virtex5";
 800 +			xlnx,interconnect = <0x1>;
 801 +			xlnx,jtag-chain = <0x2>;
 802 +			xlnx,mb-dbg-ports = <0x1>;
 803 +			xlnx,uart-width = <0x8>;
 804 +			xlnx,use-uart = <0x1>;
 805 +			xlnx,write-fsl-ports = <0x0>;
 806 +		} ;
 807 +		mpmc@90000000 {
 808 +			#address-cells = <1>;
 809 +			#size-cells = <1>;
 810 +			compatible = "xlnx,mpmc-4.02.a";
 811 +			PIM3: sdma@84600180 {
 812 +				compatible = "xlnx,ll-dma-1.00.a";
 813 +				interrupt-parent = <&xps_intc_0>;
 814 +				interrupts = < 2 2 1 2 >;
 815 +				reg = < 0x84600180 0x80 >;
 816 +			} ;
 817 +		} ;
 818 +		xps_intc_0: interrupt-controller@81800000 {
 819 +			#interrupt-cells = <0x2>;
 820 +			compatible = "xlnx,xps-intc-1.00.a";
 821 +			interrupt-controller ;
 822 +			reg = < 0x81800000 0x10000 >;
 823 +			xlnx,kind-of-intr = <0x100>;
 824 +			xlnx,num-intr-inputs = <0x9>;
 825 +		} ;
 826 +		xps_timer_1: timer@83c00000 {
 827 +			compatible = "xlnx,xps-timer-1.00.a";
 828 +			interrupt-parent = <&xps_intc_0>;
 829 +			interrupts = < 3 2 >;
 830 +			reg = < 0x83c00000 0x10000 >;
 831 +			xlnx,count-width = <0x20>;
 832 +			xlnx,family = "virtex5";
 833 +			xlnx,gen0-assert = <0x1>;
 834 +			xlnx,gen1-assert = <0x1>;
 835 +			xlnx,one-timer-only = <0x0>;
 836 +			xlnx,trig0-assert = <0x1>;
 837 +			xlnx,trig1-assert = <0x1>;
 838 +		} ;
 839 +	} ;
 840 +}  ;
 841 diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
 842 index 5742bb4..5c0a357 100644
 843 --- a/arch/mips/jz4740/board-qi_lb60.c
 844 +++ b/arch/mips/jz4740/board-qi_lb60.c
 845 @@ -5,7 +5,7 @@
 846   *
 847   * Copyright (c) 2009 Qi Hardware inc.,
 848   * Author: Xiangfu Liu <xiangfu@qi-hardware.com>
 849 - * Copyright 2010, Lars-Petrer Clausen <lars@metafoo.de>
 850 + * Copyright 2010, Lars-Peter Clausen <lars@metafoo.de>
 851   *
 852   * This program is free software; you can redistribute it and/or modify
 853   * it under the terms of the GNU General Public License version 2 or later
 854 @@ -235,7 +235,7 @@ static const unsigned int qi_lb60_keypad_rows[] = {
 855  	QI_LB60_GPIO_KEYIN(3),
 856  	QI_LB60_GPIO_KEYIN(4),
 857  	QI_LB60_GPIO_KEYIN(5),
 858 -	QI_LB60_GPIO_KEYIN(7),
 859 +	QI_LB60_GPIO_KEYIN(6),
 860  	QI_LB60_GPIO_KEYIN8,
 861  };
 862  
 863 diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
 864 index df971fa..4896ed0 100644
 865 --- a/arch/parisc/kernel/firmware.c
 866 +++ b/arch/parisc/kernel/firmware.c
 867 @@ -1126,15 +1126,13 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
 868  	unsigned int i;
 869  	unsigned long flags;
 870  
 871 -	for (i = 0; i < count && i < 79;) {
 872 +	for (i = 0; i < count;) {
 873  		switch(str[i]) {
 874  		case '\n':
 875  			iodc_dbuf[i+0] = '\r';
 876  			iodc_dbuf[i+1] = '\n';
 877  			i += 2;
 878  			goto print;
 879 -		case '\b':	/* BS */
 880 -			i--; /* overwrite last */
 881  		default:
 882  			iodc_dbuf[i] = str[i];
 883  			i++;
 884 @@ -1142,15 +1140,6 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
 885  		}
 886  	}
 887  
 888 -	/* if we're at the end of line, and not already inserting a newline,
 889 -	 * insert one anyway. iodc console doesn't claim to support >79 char
 890 -	 * lines. don't account for this in the return value.
 891 -	 */
 892 -	if (i == 79 && iodc_dbuf[i-1] != '\n') {
 893 -		iodc_dbuf[i+0] = '\r';
 894 -		iodc_dbuf[i+1] = '\n';
 895 -	}
 896 -
 897  print:
 898          spin_lock_irqsave(&pdc_lock, flags);
 899          real32_call(PAGE0->mem_cons.iodc_io,
 900 diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
 901 index 8bcb10b..d086e0f 100644
 902 --- a/arch/powerpc/boot/dts/p1022ds.dts
 903 +++ b/arch/powerpc/boot/dts/p1022ds.dts
 904 @@ -280,13 +280,13 @@
 905  			ranges = <0x0 0xc100 0x200>;
 906  			cell-index = <1>;
 907  			dma00: dma-channel@0 {
 908 -				compatible = "fsl,eloplus-dma-channel";
 909 +				compatible = "fsl,ssi-dma-channel";
 910  				reg = <0x0 0x80>;
 911  				cell-index = <0>;
 912  				interrupts = <76 2>;
 913  			};
 914  			dma01: dma-channel@80 {
 915 -				compatible = "fsl,eloplus-dma-channel";
 916 +				compatible = "fsl,ssi-dma-channel";
 917  				reg = <0x80 0x80>;
 918  				cell-index = <1>;
 919  				interrupts = <77 2>;
 920 diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
 921 index 55cba4a..f8cd9fb 100644
 922 --- a/arch/powerpc/kernel/cpu_setup_6xx.S
 923 +++ b/arch/powerpc/kernel/cpu_setup_6xx.S
 924 @@ -18,7 +18,7 @@
 925  #include <asm/mmu.h>
 926  
 927  _GLOBAL(__setup_cpu_603)
 928 -	mflr	r4
 929 +	mflr	r5
 930  BEGIN_MMU_FTR_SECTION
 931  	li	r10,0
 932  	mtspr	SPRN_SPRG_603_LRU,r10		/* init SW LRU tracking */
 933 @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
 934  	bl	__init_fpu_registers
 935  END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
 936  	bl	setup_common_caches
 937 -	mtlr	r4
 938 +	mtlr	r5
 939  	blr
 940  _GLOBAL(__setup_cpu_604)
 941 -	mflr	r4
 942 +	mflr	r5
 943  	bl	setup_common_caches
 944  	bl	setup_604_hid0
 945 -	mtlr	r4
 946 +	mtlr	r5
 947  	blr
 948  _GLOBAL(__setup_cpu_750)
 949 -	mflr	r4
 950 +	mflr	r5
 951  	bl	__init_fpu_registers
 952  	bl	setup_common_caches
 953  	bl	setup_750_7400_hid0
 954 -	mtlr	r4
 955 +	mtlr	r5
 956  	blr
 957  _GLOBAL(__setup_cpu_750cx)
 958 -	mflr	r4
 959 +	mflr	r5
 960  	bl	__init_fpu_registers
 961  	bl	setup_common_caches
 962  	bl	setup_750_7400_hid0
 963  	bl	setup_750cx
 964 -	mtlr	r4
 965 +	mtlr	r5
 966  	blr
 967  _GLOBAL(__setup_cpu_750fx)
 968 -	mflr	r4
 969 +	mflr	r5
 970  	bl	__init_fpu_registers
 971  	bl	setup_common_caches
 972  	bl	setup_750_7400_hid0
 973  	bl	setup_750fx
 974 -	mtlr	r4
 975 +	mtlr	r5
 976  	blr
 977  _GLOBAL(__setup_cpu_7400)
 978 -	mflr	r4
 979 +	mflr	r5
 980  	bl	__init_fpu_registers
 981  	bl	setup_7400_workarounds
 982  	bl	setup_common_caches
 983  	bl	setup_750_7400_hid0
 984 -	mtlr	r4
 985 +	mtlr	r5
 986  	blr
 987  _GLOBAL(__setup_cpu_7410)
 988 -	mflr	r4
 989 +	mflr	r5
 990  	bl	__init_fpu_registers
 991  	bl	setup_7410_workarounds
 992  	bl	setup_common_caches
 993  	bl	setup_750_7400_hid0
 994  	li	r3,0
 995  	mtspr	SPRN_L2CR2,r3
 996 -	mtlr	r4
 997 +	mtlr	r5
 998  	blr
 999  _GLOBAL(__setup_cpu_745x)
1000 -	mflr	r4
1001 +	mflr	r5
1002  	bl	setup_common_caches
1003  	bl	setup_745x_specifics
1004 -	mtlr	r4
1005 +	mtlr	r5
1006  	blr
1007  
1008  /* Enable caches for 603's, 604, 750 & 7400 */
1009 @@ -194,10 +194,10 @@ setup_750cx:
1010  	cror	4*cr0+eq,4*cr0+eq,4*cr1+eq
1011  	cror	4*cr0+eq,4*cr0+eq,4*cr2+eq
1012  	bnelr
1013 -	lwz	r6,CPU_SPEC_FEATURES(r5)
1014 +	lwz	r6,CPU_SPEC_FEATURES(r4)
1015  	li	r7,CPU_FTR_CAN_NAP
1016  	andc	r6,r6,r7
1017 -	stw	r6,CPU_SPEC_FEATURES(r5)
1018 +	stw	r6,CPU_SPEC_FEATURES(r4)
1019  	blr
1020  
1021  /* 750fx specific
1022 @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
1023  	andis.	r11,r11,L3CR_L3E@h
1024  	beq	1f
1025  END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
1026 -	lwz	r6,CPU_SPEC_FEATURES(r5)
1027 +	lwz	r6,CPU_SPEC_FEATURES(r4)
1028  	andi.	r0,r6,CPU_FTR_L3_DISABLE_NAP
1029  	beq	1f
1030  	li	r7,CPU_FTR_CAN_NAP
1031  	andc	r6,r6,r7
1032 -	stw	r6,CPU_SPEC_FEATURES(r5)
1033 +	stw	r6,CPU_SPEC_FEATURES(r4)
1034  1:
1035  	mfspr	r11,SPRN_HID0
1036  
1037 diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
1038 index 8eff48e..3fee685 100644
1039 --- a/arch/powerpc/kernel/ppc970-pmu.c
1040 +++ b/arch/powerpc/kernel/ppc970-pmu.c
1041 @@ -169,9 +169,11 @@ static int p970_marked_instr_event(u64 event)
1042  	switch (unit) {
1043  	case PM_VPU:
1044  		mask = 0x4c;		/* byte 0 bits 2,3,6 */
1045 +		break;
1046  	case PM_LSU0:
1047  		/* byte 2 bits 0,2,3,4,6; all of byte 1 */
1048  		mask = 0x085dff00;
1049 +		break;
1050  	case PM_LSU1L:
1051  		mask = 0x50 << 24;	/* byte 3 bits 4,6 */
1052  		break;
1053 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
1054 index 09dffe6..1eb64ba 100644
1055 --- a/arch/powerpc/mm/hash_utils_64.c
1056 +++ b/arch/powerpc/mm/hash_utils_64.c
1057 @@ -1122,7 +1122,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1058  	else
1059  #endif /* CONFIG_PPC_HAS_HASH_64K */
1060  		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
1061 -				    subpage_protection(pgdir, ea));
1062 +				    subpage_protection(mm, ea));
1063  
1064  	/* Dump some info in case of hash insertion failure, they should
1065  	 * never happen so it is really useful to know if/when they do
1066 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
1067 index 002878c..1537ab3 100644
1068 --- a/arch/powerpc/mm/numa.c
1069 +++ b/arch/powerpc/mm/numa.c
1070 @@ -181,7 +181,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
1071  	dbg("removing cpu %lu from node %d\n", cpu, node);
1072  
1073  	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
1074 -		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
1075 +		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
1076  	} else {
1077  		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
1078  		       cpu, node);
1079 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
1080 index cf79b46..568b503 100644
1081 --- a/arch/powerpc/platforms/pseries/lpar.c
1082 +++ b/arch/powerpc/platforms/pseries/lpar.c
1083 @@ -680,6 +680,13 @@ EXPORT_SYMBOL(arch_free_page);
1084  /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
1085  extern long hcall_tracepoint_refcount;
1086  
1087 +/*
1088 + * Since the tracing code might execute hcalls we need to guard against
1089 + * recursion. One example of this are spinlocks calling H_YIELD on
1090 + * shared processor partitions.
1091 + */
1092 +static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
1093 +
1094  void hcall_tracepoint_regfunc(void)
1095  {
1096  	hcall_tracepoint_refcount++;
1097 @@ -692,12 +699,42 @@ void hcall_tracepoint_unregfunc(void)
1098  
1099  void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
1100  {
1101 +	unsigned long flags;
1102 +	unsigned int *depth;
1103 +
1104 +	local_irq_save(flags);
1105 +
1106 +	depth = &__get_cpu_var(hcall_trace_depth);
1107 +
1108 +	if (*depth)
1109 +		goto out;
1110 +
1111 +	(*depth)++;
1112  	trace_hcall_entry(opcode, args);
1113 +	(*depth)--;
1114 +
1115 +out:
1116 +	local_irq_restore(flags);
1117  }
1118  
1119  void __trace_hcall_exit(long opcode, unsigned long retval,
1120  			unsigned long *retbuf)
1121  {
1122 +	unsigned long flags;
1123 +	unsigned int *depth;
1124 +
1125 +	local_irq_save(flags);
1126 +
1127 +	depth = &__get_cpu_var(hcall_trace_depth);
1128 +
1129 +	if (*depth)
1130 +		goto out;
1131 +
1132 +	(*depth)++;
1133  	trace_hcall_exit(opcode, retval, retbuf);
1134 +	(*depth)--;
1135 +
1136 +out:
1137 +	local_irq_restore(flags);
1138  }
1139  #endif
1140 diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
1141 index 3017532..b7f02a4 100644
1142 --- a/arch/powerpc/sysdev/fsl_rio.c
1143 +++ b/arch/powerpc/sysdev/fsl_rio.c
1144 @@ -954,7 +954,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
1145  	if (dsr & DOORBELL_DSR_QFI) {
1146  		pr_info("RIO: doorbell queue full\n");
1147  		out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
1148 -		goto out;
1149  	}
1150  
1151  	/* XXX Need to check/dispatch until queue empty */
1152 diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
1153 index ac15139..1995c17 100644
1154 --- a/arch/s390/kernel/nmi.c
1155 +++ b/arch/s390/kernel/nmi.c
1156 @@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
1157  static int notrace s390_revalidate_registers(struct mci *mci)
1158  {
1159  	int kill_task;
1160 -	u64 tmpclock;
1161  	u64 zero;
1162  	void *fpt_save_area, *fpt_creg_save_area;
1163  
1164 @@ -214,11 +213,10 @@ static int notrace s390_revalidate_registers(struct mci *mci)
1165  			: "0", "cc");
1166  #endif
1167  	/* Revalidate clock comparator register */
1168 -	asm volatile(
1169 -		"	stck	0(%1)\n"
1170 -		"	sckc	0(%1)"
1171 -		: "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
1172 -
1173 +	if (S390_lowcore.clock_comparator == -1)
1174 +		set_clock_comparator(S390_lowcore.mcck_clock);
1175 +	else
1176 +		set_clock_comparator(S390_lowcore.clock_comparator);
1177  	/* Check if old PSW is valid */
1178  	if (!mci->wp)
1179  		/*
1180 diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
1181 index 3479f1b..c1e326c 100644
1182 --- a/arch/s390/kernel/vtime.c
1183 +++ b/arch/s390/kernel/vtime.c
1184 @@ -19,6 +19,7 @@
1185  #include <linux/kernel_stat.h>
1186  #include <linux/rcupdate.h>
1187  #include <linux/posix-timers.h>
1188 +#include <linux/cpu.h>
1189  
1190  #include <asm/s390_ext.h>
1191  #include <asm/timer.h>
1192 @@ -565,6 +566,23 @@ void init_cpu_vtimer(void)
1193  	__ctl_set_bit(0,10);
1194  }
1195  
1196 +static int __cpuinit s390_nohz_notify(struct notifier_block *self,
1197 +				      unsigned long action, void *hcpu)
1198 +{
1199 +	struct s390_idle_data *idle;
1200 +	long cpu = (long) hcpu;
1201 +
1202 +	idle = &per_cpu(s390_idle, cpu);
1203 +	switch (action) {
1204 +	case CPU_DYING:
1205 +	case CPU_DYING_FROZEN:
1206 +		idle->nohz_delay = 0;
1207 +	default:
1208 +		break;
1209 +	}
1210 +	return NOTIFY_OK;
1211 +}
1212 +
1213  void __init vtime_init(void)
1214  {
1215  	/* request the cpu timer external interrupt */
1216 @@ -573,5 +591,6 @@ void __init vtime_init(void)
1217  
1218  	/* Enable cpu timer interrupts on the boot cpu. */
1219  	init_cpu_vtimer();
1220 +	cpu_notifier(s390_nohz_notify, 0);
1221  }
1222  
1223 diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
1224 index 752b362..7c37ec3 100644
1225 --- a/arch/s390/lib/delay.c
1226 +++ b/arch/s390/lib/delay.c
1227 @@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned long long usecs)
1228  {
1229  	unsigned long mask, cr0, cr0_saved;
1230  	u64 clock_saved;
1231 +	u64 end;
1232  
1233 +	mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
1234 +	end = get_clock() + (usecs << 12);
1235  	clock_saved = local_tick_disable();
1236 -	set_clock_comparator(get_clock() + (usecs << 12));
1237  	__ctl_store(cr0_saved, 0, 0);
1238  	cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
1239  	__ctl_load(cr0 , 0, 0);
1240 -	mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
1241  	lockdep_off();
1242 -	trace_hardirqs_on();
1243 -	__load_psw_mask(mask);
1244 -	local_irq_disable();
1245 +	do {
1246 +		set_clock_comparator(end);
1247 +		trace_hardirqs_on();
1248 +		__load_psw_mask(mask);
1249 +		local_irq_disable();
1250 +	} while (get_clock() < end);
1251  	lockdep_on();
1252  	__ctl_load(cr0_saved, 0, 0);
1253  	local_tick_enable(clock_saved);
1254 diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
1255 index b237d52..34ba197 100644
1256 --- a/arch/sh/include/asm/io.h
1257 +++ b/arch/sh/include/asm/io.h
1258 @@ -322,7 +322,15 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
1259  	 * mapping must be done by the PMB or by using page tables.
1260  	 */
1261  	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
1262 -		if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
1263 +		u64 flags = pgprot_val(prot);
1264 +
1265 +		/*
1266 +		 * Anything using the legacy PTEA space attributes needs
1267 +		 * to be kicked down to page table mappings.
1268 +		 */
1269 +		if (unlikely(flags & _PAGE_PCC_MASK))
1270 +			return NULL;
1271 +		if (unlikely(flags & _PAGE_CACHABLE))
1272  			return (void __iomem *)P1SEGADDR(offset);
1273  
1274  		return (void __iomem *)P2SEGADDR(offset);
1275 diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
1276 index e172d69..45743bf 100644
1277 --- a/arch/sh/include/asm/pgtable_32.h
1278 +++ b/arch/sh/include/asm/pgtable_32.h
1279 @@ -76,6 +76,10 @@
1280  /* Wrapper for extended mode pgprot twiddling */
1281  #define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
1282  
1283 +#ifdef CONFIG_X2TLB
1284 +#define _PAGE_PCC_MASK	0x00000000	/* No legacy PTEA support */
1285 +#else
1286 +
1287  /* software: moves to PTEA.TC (Timing Control) */
1288  #define _PAGE_PCC_AREA5	0x00000000	/* use BSC registers for area5 */
1289  #define _PAGE_PCC_AREA6	0x80000000	/* use BSC registers for area6 */
1290 @@ -89,7 +93,8 @@
1291  #define _PAGE_PCC_ATR8	0x60000000	/* Attribute Memory space, 8 bit bus */
1292  #define _PAGE_PCC_ATR16	0x60000001	/* Attribute Memory space, 6 bit bus */
1293  
1294 -#ifndef CONFIG_X2TLB
1295 +#define _PAGE_PCC_MASK	0xe0000001
1296 +
1297  /* copy the ptea attributes */
1298  static inline unsigned long copy_ptea_attributes(unsigned long x)
1299  {
1300 diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h
1301 index be201fd..ae717e3 100644
1302 --- a/arch/sh/include/asm/syscalls_32.h
1303 +++ b/arch/sh/include/asm/syscalls_32.h
1304 @@ -19,9 +19,10 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
1305  asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
1306  			 unsigned long r6, unsigned long r7,
1307  			 struct pt_regs __regs);
1308 -asmlinkage int sys_execve(const char __user *ufilename, char __user * __user *uargv,
1309 -			  char __user * __user *uenvp, unsigned long r7,
1310 -			  struct pt_regs __regs);
1311 +asmlinkage int sys_execve(const char __user *ufilename,
1312 +			  const char __user *const __user *uargv,
1313 +			  const char __user *const __user *uenvp,
1314 +			  unsigned long r7, struct pt_regs __regs);
1315  asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5,
1316  			      unsigned long r6, unsigned long r7,
1317  			      struct pt_regs __regs);
1318 diff --git a/arch/sparc/include/asm/openprom.h b/arch/sparc/include/asm/openprom.h
1319 index 963e1a4..f61a501 100644
1320 --- a/arch/sparc/include/asm/openprom.h
1321 +++ b/arch/sparc/include/asm/openprom.h
1322 @@ -37,7 +37,7 @@ struct linux_dev_v2_funcs {
1323  	int (*v2_dev_open)(char *devpath);
1324  	void (*v2_dev_close)(int d);
1325  	int (*v2_dev_read)(int d, char *buf, int nbytes);
1326 -	int (*v2_dev_write)(int d, char *buf, int nbytes);
1327 +	int (*v2_dev_write)(int d, const char *buf, int nbytes);
1328  	int (*v2_dev_seek)(int d, int hi, int lo);
1329  
1330  	/* Never issued (multistage load support) */
1331 diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h
1332 index 33e31ce..618a5bd 100644
1333 --- a/arch/sparc/include/asm/oplib_32.h
1334 +++ b/arch/sparc/include/asm/oplib_32.h
1335 @@ -60,25 +60,6 @@ extern char *prom_getbootargs(void);
1336  extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes);
1337  extern void prom_unmapio(char *virt_addr, unsigned int num_bytes);
1338  
1339 -/* Device operations. */
1340 -
1341 -/* Open the device described by the passed string.  Note, that the format
1342 - * of the string is different on V0 vs. V2->higher proms.  The caller must
1343 - * know what he/she is doing!  Returns the device descriptor, an int.
1344 - */
1345 -extern int prom_devopen(char *device_string);
1346 -
1347 -/* Close a previously opened device described by the passed integer
1348 - * descriptor.
1349 - */
1350 -extern int prom_devclose(int device_handle);
1351 -
1352 -/* Do a seek operation on the device described by the passed integer
1353 - * descriptor.
1354 - */
1355 -extern void prom_seek(int device_handle, unsigned int seek_hival,
1356 -		      unsigned int seek_lowval);
1357 -
1358  /* Miscellaneous routines, don't really fit in any category per se. */
1359  
1360  /* Reboot the machine with the command line passed. */
1361 @@ -121,19 +102,8 @@ extern int prom_getrev(void);
1362  /* Get the prom firmware revision. */
1363  extern int prom_getprev(void);
1364  
1365 -/* Character operations to/from the console.... */
1366 -
1367 -/* Non-blocking get character from console. */
1368 -extern int prom_nbgetchar(void);
1369 -
1370 -/* Non-blocking put character to console. */
1371 -extern int prom_nbputchar(char character);
1372 -
1373 -/* Blocking get character from console. */
1374 -extern char prom_getchar(void);
1375 -
1376 -/* Blocking put character to console. */
1377 -extern void prom_putchar(char character);
1378 +/* Write a buffer of characters to the console. */
1379 +extern void prom_console_write_buf(const char *buf, int len);
1380  
1381  /* Prom's internal routines, don't use in kernel/boot code. */
1382  extern void prom_printf(const char *fmt, ...);
1383 @@ -238,7 +208,6 @@ extern int prom_node_has_property(int node, char *property);
1384  extern int prom_setprop(int node, const char *prop_name, char *prop_value,
1385  			int value_size);
1386  
1387 -extern int prom_pathtoinode(char *path);
1388  extern int prom_inst2pkg(int);
1389  
1390  /* Dorking with Bus ranges... */
1391 diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
1392 index 3e0b2d6..209463d 100644
1393 --- a/arch/sparc/include/asm/oplib_64.h
1394 +++ b/arch/sparc/include/asm/oplib_64.h
1395 @@ -67,27 +67,6 @@ extern void prom_init(void *cif_handler, void *cif_stack);
1396  /* Boot argument acquisition, returns the boot command line string. */
1397  extern char *prom_getbootargs(void);
1398  
1399 -/* Device utilities. */
1400 -
1401 -/* Device operations. */
1402 -
1403 -/* Open the device described by the passed string.  Note, that the format
1404 - * of the string is different on V0 vs. V2->higher proms.  The caller must
1405 - * know what he/she is doing!  Returns the device descriptor, an int.
1406 - */
1407 -extern int prom_devopen(const char *device_string);
1408 -
1409 -/* Close a previously opened device described by the passed integer
1410 - * descriptor.
1411 - */
1412 -extern int prom_devclose(int device_handle);
1413 -
1414 -/* Do a seek operation on the device described by the passed integer
1415 - * descriptor.
1416 - */
1417 -extern void prom_seek(int device_handle, unsigned int seek_hival,
1418 -		      unsigned int seek_lowval);
1419 -
1420  /* Miscellaneous routines, don't really fit in any category per se. */
1421  
1422  /* Reboot the machine with the command line passed. */
1423 @@ -109,33 +88,14 @@ extern void prom_halt(void) __attribute__ ((noreturn));
1424  /* Halt and power-off the machine. */
1425  extern void prom_halt_power_off(void) __attribute__ ((noreturn));
1426  
1427 -/* Set the PROM 'sync' callback function to the passed function pointer.
1428 - * When the user gives the 'sync' command at the prom prompt while the
1429 - * kernel is still active, the prom will call this routine.
1430 - *
1431 - */
1432 -typedef int (*callback_func_t)(long *cmd);
1433 -extern void prom_setcallback(callback_func_t func_ptr);
1434 -
1435  /* Acquire the IDPROM of the root node in the prom device tree.  This
1436   * gets passed a buffer where you would like it stuffed.  The return value
1437   * is the format type of this idprom or 0xff on error.
1438   */
1439  extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
1440  
1441 -/* Character operations to/from the console.... */
1442 -
1443 -/* Non-blocking get character from console. */
1444 -extern int prom_nbgetchar(void);
1445 -
1446 -/* Non-blocking put character to console. */
1447 -extern int prom_nbputchar(char character);
1448 -
1449 -/* Blocking get character from console. */
1450 -extern char prom_getchar(void);
1451 -
1452 -/* Blocking put character to console. */
1453 -extern void prom_putchar(char character);
1454 +/* Write a buffer of characters to the console. */
1455 +extern void prom_console_write_buf(const char *buf, int len);
1456  
1457  /* Prom's internal routines, don't use in kernel/boot code. */
1458  extern void prom_printf(const char *fmt, ...);
1459 @@ -278,9 +238,7 @@ extern int prom_finddevice(const char *name);
1460  extern int prom_setprop(int node, const char *prop_name, char *prop_value,
1461  			int value_size);
1462  
1463 -extern int prom_pathtoinode(const char *path);
1464  extern int prom_inst2pkg(int);
1465 -extern int prom_service_exists(const char *service_name);
1466  extern void prom_sun4v_guest_soft_state(void);
1467  
1468  extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
1469 diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
1470 index 6a7b4db..dcefd22 100644
1471 --- a/arch/sparc/kernel/leon_kernel.c
1472 +++ b/arch/sparc/kernel/leon_kernel.c
1473 @@ -114,7 +114,7 @@ void __init leon_init_timers(irq_handler_t counter_fn)
1474  	if (leon3_gptimer_regs && leon3_irqctrl_regs) {
1475  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0);
1476  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld,
1477 -				      (((1000000 / 100) - 1)));
1478 +				      (((1000000 / HZ) - 1)));
1479  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0);
1480  
1481  #ifdef CONFIG_SMP
1482 @@ -128,7 +128,7 @@ void __init leon_init_timers(irq_handler_t counter_fn)
1483  		}
1484  
1485  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0);
1486 -		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1)));
1487 +		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/HZ) - 1)));
1488  		LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0);
1489  # endif
1490  
1491 diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile
1492 index 1b8c073..816c0fa 100644
1493 --- a/arch/sparc/prom/Makefile
1494 +++ b/arch/sparc/prom/Makefile
1495 @@ -6,7 +6,6 @@ ccflags := -Werror
1496  
1497  lib-y                 := bootstr_$(BITS).o
1498  lib-$(CONFIG_SPARC32) += devmap.o
1499 -lib-y                 += devops_$(BITS).o
1500  lib-y                 += init_$(BITS).o
1501  lib-$(CONFIG_SPARC32) += memory.o
1502  lib-y                 += misc_$(BITS).o
1503 diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c
1504 index 5340264..4886310 100644
1505 --- a/arch/sparc/prom/console_32.c
1506 +++ b/arch/sparc/prom/console_32.c
1507 @@ -16,63 +16,26 @@
1508  
1509  extern void restore_current(void);
1510  
1511 -/* Non blocking get character from console input device, returns -1
1512 - * if no input was taken.  This can be used for polling.
1513 - */
1514 -int
1515 -prom_nbgetchar(void)
1516 -{
1517 -	static char inc;
1518 -	int i = -1;
1519 -	unsigned long flags;
1520 -
1521 -	spin_lock_irqsave(&prom_lock, flags);
1522 -	switch(prom_vers) {
1523 -	case PROM_V0:
1524 -		i = (*(romvec->pv_nbgetchar))();
1525 -		break;
1526 -	case PROM_V2:
1527 -	case PROM_V3:
1528 -		if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) {
1529 -			i = inc;
1530 -		} else {
1531 -			i = -1;
1532 -		}
1533 -		break;
1534 -	default:
1535 -		i = -1;
1536 -		break;
1537 -	};
1538 -	restore_current();
1539 -	spin_unlock_irqrestore(&prom_lock, flags);
1540 -	return i; /* Ugh, we could spin forever on unsupported proms ;( */
1541 -}
1542 -
1543  /* Non blocking put character to console device, returns -1 if
1544   * unsuccessful.
1545   */
1546 -int
1547 -prom_nbputchar(char c)
1548 +static int prom_nbputchar(const char *buf)
1549  {
1550 -	static char outc;
1551  	unsigned long flags;
1552  	int i = -1;
1553  
1554  	spin_lock_irqsave(&prom_lock, flags);
1555  	switch(prom_vers) {
1556  	case PROM_V0:
1557 -		i = (*(romvec->pv_nbputchar))(c);
1558 +		i = (*(romvec->pv_nbputchar))(*buf);
1559  		break;
1560  	case PROM_V2:
1561  	case PROM_V3:
1562 -		outc = c;
1563 -		if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1)
1564 +		if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout,
1565 +							  buf, 0x1) == 1)
1566  			i = 0;
1567 -		else
1568 -			i = -1;
1569  		break;
1570  	default:
1571 -		i = -1;
1572  		break;
1573  	};
1574  	restore_current();
1575 @@ -80,18 +43,14 @@ prom_nbputchar(char c)
1576  	return i; /* Ugh, we could spin forever on unsupported proms ;( */
1577  }
1578  
1579 -/* Blocking version of get character routine above. */
1580 -char
1581 -prom_getchar(void)
1582 +void prom_console_write_buf(const char *buf, int len)
1583  {
1584 -	int character;
1585 -	while((character = prom_nbgetchar()) == -1) ;
1586 -	return (char) character;
1587 +	while (len) {
1588 +		int n = prom_nbputchar(buf);
1589 +		if (n)
1590 +			continue;
1591 +		len--;
1592 +		buf++;
1593 +	}
1594  }
1595  
1596 -/* Blocking version of put character routine above. */
1597 -void
1598 -prom_putchar(char c)
1599 -{
1600 -	while(prom_nbputchar(c) == -1) ;
1601 -}
1602 diff --git a/arch/sparc/prom/console_64.c b/arch/sparc/prom/console_64.c
1603 index 10322dc..ed39e75 100644
1604 --- a/arch/sparc/prom/console_64.c
1605 +++ b/arch/sparc/prom/console_64.c
1606 @@ -15,85 +15,34 @@
1607  
1608  extern int prom_stdin, prom_stdout;
1609  
1610 -/* Non blocking get character from console input device, returns -1
1611 - * if no input was taken.  This can be used for polling.
1612 - */
1613 -inline int
1614 -prom_nbgetchar(void)
1615 -{
1616 -	unsigned long args[7];
1617 -	char inc;
1618 -
1619 -	args[0] = (unsigned long) "read";
1620 -	args[1] = 3;
1621 -	args[2] = 1;
1622 -	args[3] = (unsigned int) prom_stdin;
1623 -	args[4] = (unsigned long) &inc;
1624 -	args[5] = 1;
1625 -	args[6] = (unsigned long) -1;
1626 -
1627 -	p1275_cmd_direct(args);
1628 -
1629 -	if (args[6] == 1)
1630 -		return inc;
1631 -	return -1;
1632 -}
1633 -
1634 -/* Non blocking put character to console device, returns -1 if
1635 - * unsuccessful.
1636 - */
1637 -inline int
1638 -prom_nbputchar(char c)
1639 +static int __prom_console_write_buf(const char *buf, int len)
1640  {
1641  	unsigned long args[7];
1642 -	char outc;
1643 -	
1644 -	outc = c;
1645 +	int ret;
1646  
1647  	args[0] = (unsigned long) "write";
1648  	args[1] = 3;
1649  	args[2] = 1;
1650  	args[3] = (unsigned int) prom_stdout;
1651 -	args[4] = (unsigned long) &outc;
1652 -	args[5] = 1;
1653 +	args[4] = (unsigned long) buf;
1654 +	args[5] = (unsigned int) len;
1655  	args[6] = (unsigned long) -1;
1656  
1657  	p1275_cmd_direct(args);
1658  
1659 -	if (args[6] == 1)
1660 -		return 0;
1661 -	else
1662 +	ret = (int) args[6];
1663 +	if (ret < 0)
1664  		return -1;
1665 +	return ret;
1666  }
1667  
1668 -/* Blocking version of get character routine above. */
1669 -char
1670 -prom_getchar(void)
1671 -{
1672 -	int character;
1673 -	while((character = prom_nbgetchar()) == -1) ;
1674 -	return (char) character;
1675 -}
1676 -
1677 -/* Blocking version of put character routine above. */
1678 -void
1679 -prom_putchar(char c)
1680 +void prom_console_write_buf(const char *buf, int len)
1681  {
1682 -	prom_nbputchar(c);
1683 -}
1684 -
1685 -void
1686 -prom_puts(const char *s, int len)
1687 -{
1688 -	unsigned long args[7];
1689 -
1690 -	args[0] = (unsigned long) "write";
1691 -	args[1] = 3;
1692 -	args[2] = 1;
1693 -	args[3] = (unsigned int) prom_stdout;
1694 -	args[4] = (unsigned long) s;
1695 -	args[5] = len;
1696 -	args[6] = (unsigned long) -1;
1697 -
1698 -	p1275_cmd_direct(args);
1699 +	while (len) {
1700 +		int n = __prom_console_write_buf(buf, len);
1701 +		if (n < 0)
1702 +			continue;
1703 +		len -= n;
1704 +		buf += len;
1705 +	}
1706  }
1707 diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c
1708 index 6cb1581..2fdcebf 100644
1709 --- a/arch/sparc/prom/misc_64.c
1710 +++ b/arch/sparc/prom/misc_64.c
1711 @@ -18,7 +18,7 @@
1712  #include <asm/system.h>
1713  #include <asm/ldc.h>
1714  
1715 -int prom_service_exists(const char *service_name)
1716 +static int prom_service_exists(const char *service_name)
1717  {
1718  	unsigned long args[5];
1719  
1720 @@ -150,20 +150,6 @@ void prom_halt_power_off(void)
1721  	prom_halt();
1722  }
1723  
1724 -/* Set prom sync handler to call function 'funcp'. */
1725 -void prom_setcallback(callback_func_t funcp)
1726 -{
1727 -	unsigned long args[5];
1728 -	if (!funcp)
1729 -		return;
1730 -	args[0] = (unsigned long) "set-callback";
1731 -	args[1] = 1;
1732 -	args[2] = 1;
1733 -	args[3] = (unsigned long) funcp;
1734 -	args[4] = (unsigned long) -1;
1735 -	p1275_cmd_direct(args);
1736 -}
1737 -
1738  /* Get the idprom and stuff it into buffer 'idbuf'.  Returns the
1739   * format type.  'num_bytes' is the number of bytes that your idbuf
1740   * has space for.  Returns 0xff on error.
1741 diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c
1742 index ca86926..d9682f0 100644
1743 --- a/arch/sparc/prom/printf.c
1744 +++ b/arch/sparc/prom/printf.c
1745 @@ -15,22 +15,45 @@
1746  
1747  #include <linux/kernel.h>
1748  #include <linux/compiler.h>
1749 +#include <linux/spinlock.h>
1750  
1751  #include <asm/openprom.h>
1752  #include <asm/oplib.h>
1753  
1754 +#define CONSOLE_WRITE_BUF_SIZE	1024
1755 +
1756  static char ppbuf[1024];
1757 +static char console_write_buf[CONSOLE_WRITE_BUF_SIZE];
1758 +static DEFINE_RAW_SPINLOCK(console_write_lock);
1759  
1760  void notrace prom_write(const char *buf, unsigned int n)
1761  {
1762 -	char ch;
1763 +	unsigned int dest_len;
1764 +	unsigned long flags;
1765 +	char *dest;
1766 +
1767 +	dest = console_write_buf;
1768 +	raw_spin_lock_irqsave(&console_write_lock, flags);
1769  
1770 -	while (n != 0) {
1771 -		--n;
1772 -		if ((ch = *buf++) == '\n')
1773 -			prom_putchar('\r');
1774 -		prom_putchar(ch);
1775 +	dest_len = 0;
1776 +	while (n-- != 0) {
1777 +		char ch = *buf++;
1778 +		if (ch == '\n') {
1779 +			*dest++ = '\r';
1780 +			dest_len++;
1781 +		}
1782 +		*dest++ = ch;
1783 +		dest_len++;
1784 +		if (dest_len >= CONSOLE_WRITE_BUF_SIZE - 1) {
1785 +			prom_console_write_buf(console_write_buf, dest_len);
1786 +			dest = console_write_buf;
1787 +			dest_len = 0;
1788 +		}
1789  	}
1790 +	if (dest_len)
1791 +		prom_console_write_buf(console_write_buf, dest_len);
1792 +
1793 +	raw_spin_unlock_irqrestore(&console_write_lock, flags);
1794  }
1795  
1796  void notrace prom_printf(const char *fmt, ...)
1797 diff --git a/arch/sparc/prom/tree_32.c b/arch/sparc/prom/tree_32.c
1798 index b21592f..71e7f08 100644
1799 --- a/arch/sparc/prom/tree_32.c
1800 +++ b/arch/sparc/prom/tree_32.c
1801 @@ -341,18 +341,3 @@ int prom_inst2pkg(int inst)
1802  	if (node == -1) return 0;
1803  	return node;
1804  }
1805 -
1806 -/* Return 'node' assigned to a particular prom 'path'
1807 - * FIXME: Should work for v0 as well
1808 - */
1809 -int prom_pathtoinode(char *path)
1810 -{
1811 -	int node, inst;
1812 -	
1813 -	inst = prom_devopen (path);
1814 -	if (inst == -1) return 0;
1815 -	node = prom_inst2pkg (inst);
1816 -	prom_devclose (inst);
1817 -	if (node == -1) return 0;
1818 -	return node;
1819 -}
1820 diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c
1821 index 9d3f913..8327b1b 100644
1822 --- a/arch/sparc/prom/tree_64.c
1823 +++ b/arch/sparc/prom/tree_64.c
1824 @@ -374,24 +374,6 @@ inline int prom_inst2pkg(int inst)
1825  	return node;
1826  }
1827  
1828 -/* Return 'node' assigned to a particular prom 'path'
1829 - * FIXME: Should work for v0 as well
1830 - */
1831 -int
1832 -prom_pathtoinode(const char *path)
1833 -{
1834 -	int node, inst;
1835 -
1836 -	inst = prom_devopen (path);
1837 -	if (inst == 0)
1838 -		return 0;
1839 -	node = prom_inst2pkg(inst);
1840 -	prom_devclose(inst);
1841 -	if (node == -1)
1842 -		return 0;
1843 -	return node;
1844 -}
1845 -
1846  int prom_ihandle2path(int handle, char *buffer, int bufsize)
1847  {
1848  	unsigned long args[7];
1849 diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
1850 index 84c2911..aaf6282 100644
1851 --- a/arch/tile/kernel/process.c
1852 +++ b/arch/tile/kernel/process.c
1853 @@ -212,6 +212,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
1854  	childregs->sp = sp;  /* override with new user stack pointer */
1855  
1856  	/*
1857 +	 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
1858 +	 * which is passed in as arg #5 to sys_clone().
1859 +	 */
1860 +	if (clone_flags & CLONE_SETTLS)
1861 +		childregs->tp = regs->regs[4];
1862 +
1863 +	/*
1864  	 * Copy the callee-saved registers from the passed pt_regs struct
1865  	 * into the context-switch callee-saved registers area.
1866  	 * We have to restore the callee-saved registers since we may
1867 diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
1868 index 7f7338c..1664cce 100644
1869 --- a/arch/um/drivers/line.c
1870 +++ b/arch/um/drivers/line.c
1871 @@ -727,6 +727,9 @@ struct winch {
1872  
1873  static void free_winch(struct winch *winch, int free_irq_ok)
1874  {
1875 +	if (free_irq_ok)
1876 +		free_irq(WINCH_IRQ, winch);
1877 +
1878  	list_del(&winch->list);
1879  
1880  	if (winch->pid != -1)
1881 @@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok)
1882  		os_close_file(winch->fd);
1883  	if (winch->stack != 0)
1884  		free_stack(winch->stack, 0);
1885 -	if (free_irq_ok)
1886 -		free_irq(WINCH_IRQ, winch);
1887  	kfree(winch);
1888  }
1889  
1890 diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
1891 index ec63785..9a873d7 100644
1892 --- a/arch/um/kernel/uml.lds.S
1893 +++ b/arch/um/kernel/uml.lds.S
1894 @@ -22,7 +22,7 @@ SECTIONS
1895    _text = .;
1896    _stext = .;
1897    __init_begin = .;
1898 -  INIT_TEXT_SECTION(PAGE_SIZE)
1899 +  INIT_TEXT_SECTION(0)
1900    . = ALIGN(PAGE_SIZE);
1901  
1902    .text      :
1903 diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
1904 index dec5678..6e3359d 100644
1905 --- a/arch/um/os-Linux/time.c
1906 +++ b/arch/um/os-Linux/time.c
1907 @@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv)
1908  long long disable_timer(void)
1909  {
1910  	struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } });
1911 -	int remain, max = UM_NSEC_PER_SEC / UM_HZ;
1912 +	long long remain, max = UM_NSEC_PER_SEC / UM_HZ;
1913  
1914  	if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0)
1915  		printk(UM_KERN_ERR "disable_timer - setitimer failed, "
1916 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
1917 index 3f76523..f857bd3 100644
1918 --- a/arch/x86/include/asm/cpufeature.h
1919 +++ b/arch/x86/include/asm/cpufeature.h
1920 @@ -152,7 +152,7 @@
1921  #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
1922  #define X86_FEATURE_OSVW	(6*32+ 9) /* OS Visible Workaround */
1923  #define X86_FEATURE_IBS		(6*32+10) /* Instruction Based Sampling */
1924 -#define X86_FEATURE_SSE5	(6*32+11) /* SSE-5 */
1925 +#define X86_FEATURE_XOP		(6*32+11) /* extended AVX instructions */
1926  #define X86_FEATURE_SKINIT	(6*32+12) /* SKINIT/STGI instructions */
1927  #define X86_FEATURE_WDT		(6*32+13) /* Watchdog timer */
1928  #define X86_FEATURE_NODEID_MSR	(6*32+19) /* NodeId MSR */
1929 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
1930 index 30a3e97..6a45ec4 100644
1931 --- a/arch/x86/include/asm/io.h
1932 +++ b/arch/x86/include/asm/io.h
1933 @@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
1934  
1935  extern void iounmap(volatile void __iomem *addr);
1936  
1937 +extern void set_iounmap_nonlazy(void);
1938  
1939  #ifdef __KERNEL__
1940  
1941 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
1942 index c52e2eb..6986312 100644
1943 --- a/arch/x86/include/asm/kvm_host.h
1944 +++ b/arch/x86/include/asm/kvm_host.h
1945 @@ -79,7 +79,7 @@
1946  #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
1947  #define KVM_MIN_FREE_MMU_PAGES 5
1948  #define KVM_REFILL_PAGES 25
1949 -#define KVM_MAX_CPUID_ENTRIES 40
1950 +#define KVM_MAX_CPUID_ENTRIES 80
1951  #define KVM_NR_FIXED_MTRR_REGION 88
1952  #define KVM_NR_VAR_MTRR 8
1953  
1954 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
1955 index 4a2d4e0..8b5393e 100644
1956 --- a/arch/x86/include/asm/mmu_context.h
1957 +++ b/arch/x86/include/asm/mmu_context.h
1958 @@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1959  	unsigned cpu = smp_processor_id();
1960  
1961  	if (likely(prev != next)) {
1962 -		/* stop flush ipis for the previous mm */
1963 -		cpumask_clear_cpu(cpu, mm_cpumask(prev));
1964  #ifdef CONFIG_SMP
1965  		percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
1966  		percpu_write(cpu_tlbstate.active_mm, next);
1967 @@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1968  		/* Re-load page tables */
1969  		load_cr3(next->pgd);
1970  
1971 +		/* stop flush ipis for the previous mm */
1972 +		cpumask_clear_cpu(cpu, mm_cpumask(prev));
1973 +
1974  		/*
1975  		 * load the LDT, if the LDT is different:
1976  		 */
1977 diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
1978 index 1635074..33fc296 100644
1979 --- a/arch/x86/include/asm/mrst.h
1980 +++ b/arch/x86/include/asm/mrst.h
1981 @@ -26,7 +26,7 @@ enum mrst_cpu_type {
1982  };
1983  
1984  extern enum mrst_cpu_type __mrst_cpu_chip;
1985 -static enum mrst_cpu_type mrst_identify_cpu(void)
1986 +static inline enum mrst_cpu_type mrst_identify_cpu(void)
1987  {
1988  	return __mrst_cpu_chip;
1989  }
1990 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
1991 index ebaa04a..37ea41c 100644
1992 --- a/arch/x86/include/asm/processor.h
1993 +++ b/arch/x86/include/asm/processor.h
1994 @@ -768,29 +768,6 @@ extern unsigned long		idle_halt;
1995  extern unsigned long		idle_nomwait;
1996  extern bool			c1e_detected;
1997  
1998 -/*
1999 - * on systems with caches, caches must be flashed as the absolute
2000 - * last instruction before going into a suspended halt.  Otherwise,
2001 - * dirty data can linger in the cache and become stale on resume,
2002 - * leading to strange errors.
2003 - *
2004 - * perform a variety of operations to guarantee that the compiler
2005 - * will not reorder instructions.  wbinvd itself is serializing
2006 - * so the processor will not reorder.
2007 - *
2008 - * Systems without cache can just go into halt.
2009 - */
2010 -static inline void wbinvd_halt(void)
2011 -{
2012 -	mb();
2013 -	/* check for clflush to determine if wbinvd is legal */
2014 -	if (cpu_has_clflush)
2015 -		asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
2016 -	else
2017 -		while (1)
2018 -			halt();
2019 -}
2020 -
2021  extern void enable_sep_cpu(void);
2022  extern int sysenter_setup(void);
2023  
2024 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
2025 index 4cfc908..4c2f63c 100644
2026 --- a/arch/x86/include/asm/smp.h
2027 +++ b/arch/x86/include/asm/smp.h
2028 @@ -50,7 +50,7 @@ struct smp_ops {
2029  	void (*smp_prepare_cpus)(unsigned max_cpus);
2030  	void (*smp_cpus_done)(unsigned max_cpus);
2031  
2032 -	void (*smp_send_stop)(void);
2033 +	void (*stop_other_cpus)(int wait);
2034  	void (*smp_send_reschedule)(int cpu);
2035  
2036  	int (*cpu_up)(unsigned cpu);
2037 @@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
2038  
2039  static inline void smp_send_stop(void)
2040  {
2041 -	smp_ops.smp_send_stop();
2042 +	smp_ops.stop_other_cpus(0);
2043 +}
2044 +
2045 +static inline void stop_other_cpus(void)
2046 +{
2047 +	smp_ops.stop_other_cpus(1);
2048  }
2049  
2050  static inline void smp_prepare_boot_cpu(void)
2051 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
2052 index e3b534c..e0f220e 100644
2053 --- a/arch/x86/kernel/apic/apic.c
2054 +++ b/arch/x86/kernel/apic/apic.c
2055 @@ -1340,6 +1340,14 @@ void __cpuinit end_local_APIC_setup(void)
2056  
2057  	setup_apic_nmi_watchdog(NULL);
2058  	apic_pm_activate();
2059 +
2060 +	/*
2061 +	 * Now that local APIC setup is completed for BP, configure the fault
2062 +	 * handling for interrupt remapping.
2063 +	 */
2064 +	if (!smp_processor_id() && intr_remapping_enabled)
2065 +		enable_drhd_fault_handling();
2066 +
2067  }
2068  
2069  #ifdef CONFIG_X86_X2APIC
2070 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
2071 index 5c5b8f3..4d90327 100644
2072 --- a/arch/x86/kernel/apic/io_apic.c
2073 +++ b/arch/x86/kernel/apic/io_apic.c
2074 @@ -1397,6 +1397,7 @@ int setup_ioapic_entry(int apic_id, int irq,
2075  		irte.dlvry_mode = apic->irq_delivery_mode;
2076  		irte.vector = vector;
2077  		irte.dest_id = IRTE_DEST(destination);
2078 +		irte.redir_hint = 1;
2079  
2080  		/* Set source-id of interrupt request */
2081  		set_ioapic_sid(&irte, apic_id);
2082 @@ -3348,6 +3349,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
2083  		irte.dlvry_mode = apic->irq_delivery_mode;
2084  		irte.vector = cfg->vector;
2085  		irte.dest_id = IRTE_DEST(dest);
2086 +		irte.redir_hint = 1;
2087  
2088  		/* Set source-id of interrupt request */
2089  		if (pdev)
2090 @@ -3624,6 +3626,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
2091  	msg.data |= MSI_DATA_VECTOR(cfg->vector);
2092  	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2093  	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2094 +	msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
2095  
2096  	dmar_msi_write(irq, &msg);
2097  
2098 diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
2099 index 83e9be4..fac49a8 100644
2100 --- a/arch/x86/kernel/apic/probe_64.c
2101 +++ b/arch/x86/kernel/apic/probe_64.c
2102 @@ -76,13 +76,6 @@ void __init default_setup_apic_routing(void)
2103  		/* need to update phys_pkg_id */
2104  		apic->phys_pkg_id = apicid_phys_pkg_id;
2105  	}
2106 -
2107 -	/*
2108 -	 * Now that apic routing model is selected, configure the
2109 -	 * fault handling for intr remapping.
2110 -	 */
2111 -	if (intr_remapping_enabled)
2112 -		enable_drhd_fault_handling();
2113  }
2114  
2115  /* Same for both flat and physical. */
2116 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
2117 index ba5f62f..81fa3cb 100644
2118 --- a/arch/x86/kernel/cpu/amd.c
2119 +++ b/arch/x86/kernel/cpu/amd.c
2120 @@ -305,8 +305,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
2121  	/* use socket ID also for last level cache */
2122  	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
2123  	/* fixup topology information on multi-node processors */
2124 -	if ((c->x86 == 0x10) && (c->x86_model == 9))
2125 -		amd_fixup_dcm(c);
2126 +	amd_fixup_dcm(c);
2127  #endif
2128  }
2129  
2130 diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
2131 index cd8da24..a2baafb 100644
2132 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
2133 +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
2134 @@ -701,6 +701,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
2135  		per_cpu(acfreq_data, policy->cpu) = NULL;
2136  		acpi_processor_unregister_performance(data->acpi_data,
2137  						      policy->cpu);
2138 +		kfree(data->freq_table);
2139  		kfree(data);
2140  	}
2141  
2142 diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
2143 index c5f59d0..ac140c7 100644
2144 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c
2145 +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
2146 @@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void)
2147  
2148  	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
2149  		return 0;
2150 -	if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
2151 +	if (boot_cpu_data.x86 < 0xf)
2152  		return 0;
2153  	/* In case some hypervisor doesn't pass SYSCFG through: */
2154  	if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
2155 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
2156 index 01c0f3e..bebabec 100644
2157 --- a/arch/x86/kernel/cpu/mtrr/main.c
2158 +++ b/arch/x86/kernel/cpu/mtrr/main.c
2159 @@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
2160  }
2161  
2162  /*
2163 - * MTRR initialization for all AP's
2164 + * Delayed MTRR initialization for all AP's
2165   */
2166  void mtrr_aps_init(void)
2167  {
2168  	if (!use_intel())
2169  		return;
2170  
2171 +	/*
2172 +	 * Check if someone has requested the delay of AP MTRR initialization,
2173 +	 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
2174 +	 * then we are done.
2175 +	 */
2176 +	if (!mtrr_aps_delayed_init)
2177 +		return;
2178 +
2179  	set_mtrr(~0U, 0, 0, 0);
2180  	mtrr_aps_delayed_init = false;
2181  }
2182 diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
2183 index c2897b7..46d5844 100644
2184 --- a/arch/x86/kernel/cpu/perf_event_amd.c
2185 +++ b/arch/x86/kernel/cpu/perf_event_amd.c
2186 @@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids
2187   [ C(DTLB) ] = {
2188  	[ C(OP_READ) ] = {
2189  		[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
2190 -		[ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
2191 +		[ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
2192  	},
2193  	[ C(OP_WRITE) ] = {
2194  		[ C(RESULT_ACCESS) ] = 0,
2195 @@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids
2196   [ C(ITLB) ] = {
2197  	[ C(OP_READ) ] = {
2198  		[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
2199 -		[ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
2200 +		[ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
2201  	},
2202  	[ C(OP_WRITE) ] = {
2203  		[ C(RESULT_ACCESS) ] = -1,
2204 diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
2205 index 045b36c..9948288 100644
2206 --- a/arch/x86/kernel/crash_dump_64.c
2207 +++ b/arch/x86/kernel/crash_dump_64.c
2208 @@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
2209  	if (!csize)
2210  		return 0;
2211  
2212 -	vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
2213 +	vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
2214  	if (!vaddr)
2215  		return -ENOMEM;
2216  
2217 @@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
2218  	} else
2219  		memcpy(buf, vaddr + offset, csize);
2220  
2221 +	set_iounmap_nonlazy();
2222  	iounmap(vaddr);
2223  	return csize;
2224  }
2225 diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
2226 index ff15c9d..42c5942 100644
2227 --- a/arch/x86/kernel/hw_breakpoint.c
2228 +++ b/arch/x86/kernel/hw_breakpoint.c
2229 @@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
2230  	dr6_p = (unsigned long *)ERR_PTR(args->err);
2231  	dr6 = *dr6_p;
2232  
2233 +	/* If it's a single step, TRAP bits are random */
2234 +	if (dr6 & DR_STEP)
2235 +		return NOTIFY_DONE;
2236 +
2237  	/* Do an early return if no trap bits are set in DR6 */
2238  	if ((dr6 & DR_TRAP_BITS) == 0)
2239  		return NOTIFY_DONE;
2240 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
2241 index 3561702..2573689 100644
2242 --- a/arch/x86/kernel/microcode_intel.c
2243 +++ b/arch/x86/kernel/microcode_intel.c
2244 @@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
2245  
2246  		/* For performance reasons, reuse mc area when possible */
2247  		if (!mc || mc_size > curr_mc_size) {
2248 -			if (mc)
2249 -				vfree(mc);
2250 +			vfree(mc);
2251  			mc = vmalloc(mc_size);
2252  			if (!mc)
2253  				break;
2254 @@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
2255  
2256  		if (get_ucode_data(mc, ucode_ptr, mc_size) ||
2257  		    microcode_sanity_check(mc) < 0) {
2258 -			vfree(mc);
2259  			break;
2260  		}
2261  
2262  		if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
2263 -			if (new_mc)
2264 -				vfree(new_mc);
2265 +			vfree(new_mc);
2266  			new_rev = mc_header.rev;
2267  			new_mc  = mc;
2268  			mc = NULL;	/* trigger new vmalloc */
2269 @@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
2270  		leftover  -= mc_size;
2271  	}
2272  
2273 -	if (mc)
2274 -		vfree(mc);
2275 +	vfree(mc);
2276  
2277  	if (leftover) {
2278 -		if (new_mc)
2279 -			vfree(new_mc);
2280 +		vfree(new_mc);
2281  		state = UCODE_ERROR;
2282  		goto out;
2283  	}
2284 @@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
2285  		goto out;
2286  	}
2287  
2288 -	if (uci->mc)
2289 -		vfree(uci->mc);
2290 +	vfree(uci->mc);
2291  	uci->mc = (struct microcode_intel *)new_mc;
2292  
2293  	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
2294 diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
2295 index 0e0cdde..a2bd899 100644
2296 --- a/arch/x86/kernel/olpc.c
2297 +++ b/arch/x86/kernel/olpc.c
2298 @@ -114,6 +114,7 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
2299  	unsigned long flags;
2300  	int ret = -EIO;
2301  	int i;
2302 +	int restarts = 0;
2303  
2304  	spin_lock_irqsave(&ec_lock, flags);
2305  
2306 @@ -169,7 +170,9 @@ restart:
2307  			if (wait_on_obf(0x6c, 1)) {
2308  				printk(KERN_ERR "olpc-ec:  timeout waiting for"
2309  						" EC to provide data!\n");
2310 -				goto restart;
2311 +				if (restarts++ < 10)
2312 +					goto restart;
2313 +				goto err;
2314  			}
2315  			outbuf[i] = inb(0x68);
2316  			pr_devel("olpc-ec:  received 0x%x\n", outbuf[i]);
2317 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
2318 index e3af342..76a0d71 100644
2319 --- a/arch/x86/kernel/reboot.c
2320 +++ b/arch/x86/kernel/reboot.c
2321 @@ -641,7 +641,7 @@ void native_machine_shutdown(void)
2322  	/* O.K Now that I'm on the appropriate processor,
2323  	 * stop all of the others.
2324  	 */
2325 -	smp_send_stop();
2326 +	stop_other_cpus();
2327  #endif
2328  
2329  	lapic_shutdown();
2330 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
2331 index 74cca60..96af3a8 100644
2332 --- a/arch/x86/kernel/smp.c
2333 +++ b/arch/x86/kernel/smp.c
2334 @@ -174,10 +174,10 @@ asmlinkage void smp_reboot_interrupt(void)
2335  	irq_exit();
2336  }
2337  
2338 -static void native_smp_send_stop(void)
2339 +static void native_stop_other_cpus(int wait)
2340  {
2341  	unsigned long flags;
2342 -	unsigned long wait;
2343 +	unsigned long timeout;
2344  
2345  	if (reboot_force)
2346  		return;
2347 @@ -194,9 +194,12 @@ static void native_smp_send_stop(void)
2348  	if (num_online_cpus() > 1) {
2349  		apic->send_IPI_allbutself(REBOOT_VECTOR);
2350  
2351 -		/* Don't wait longer than a second */
2352 -		wait = USEC_PER_SEC;
2353 -		while (num_online_cpus() > 1 && wait--)
2354 +		/*
2355 +		 * Don't wait longer than a second if the caller
2356 +		 * didn't ask us to wait.
2357 +		 */
2358 +		timeout = USEC_PER_SEC;
2359 +		while (num_online_cpus() > 1 && (wait || timeout--))
2360  			udelay(1);
2361  	}
2362  
2363 @@ -254,7 +257,7 @@ struct smp_ops smp_ops = {
2364  	.smp_prepare_cpus	= native_smp_prepare_cpus,
2365  	.smp_cpus_done		= native_smp_cpus_done,
2366  
2367 -	.smp_send_stop		= native_smp_send_stop,
2368 +	.stop_other_cpus	= native_stop_other_cpus,
2369  	.smp_send_reschedule	= native_smp_send_reschedule,
2370  
2371  	.cpu_up			= native_cpu_up,
2372 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
2373 index 8b3bfc4..016179e 100644
2374 --- a/arch/x86/kernel/smpboot.c
2375 +++ b/arch/x86/kernel/smpboot.c
2376 @@ -1383,11 +1383,94 @@ void play_dead_common(void)
2377  	local_irq_disable();
2378  }
2379  
2380 +#define MWAIT_SUBSTATE_MASK		0xf
2381 +#define MWAIT_SUBSTATE_SIZE		4
2382 +
2383 +#define CPUID_MWAIT_LEAF		5
2384 +#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
2385 +
2386 +/*
2387 + * We need to flush the caches before going to sleep, lest we have
2388 + * dirty data in our caches when we come back up.
2389 + */
2390 +static inline void mwait_play_dead(void)
2391 +{
2392 +	unsigned int eax, ebx, ecx, edx;
2393 +	unsigned int highest_cstate = 0;
2394 +	unsigned int highest_subcstate = 0;
2395 +	int i;
2396 +	void *mwait_ptr;
2397 +
2398 +	if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT))
2399 +		return;
2400 +	if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH))
2401 +		return;
2402 +	if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
2403 +		return;
2404 +
2405 +	eax = CPUID_MWAIT_LEAF;
2406 +	ecx = 0;
2407 +	native_cpuid(&eax, &ebx, &ecx, &edx);
2408 +
2409 +	/*
2410 +	 * eax will be 0 if EDX enumeration is not valid.
2411 +	 * Initialized below to cstate, sub_cstate value when EDX is valid.
2412 +	 */
2413 +	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
2414 +		eax = 0;
2415 +	} else {
2416 +		edx >>= MWAIT_SUBSTATE_SIZE;
2417 +		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
2418 +			if (edx & MWAIT_SUBSTATE_MASK) {
2419 +				highest_cstate = i;
2420 +				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
2421 +			}
2422 +		}
2423 +		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
2424 +			(highest_subcstate - 1);
2425 +	}
2426 +
2427 +	/*
2428 +	 * This should be a memory location in a cache line which is
2429 +	 * unlikely to be touched by other processors.  The actual
2430 +	 * content is immaterial as it is not actually modified in any way.
2431 +	 */
2432 +	mwait_ptr = &current_thread_info()->flags;
2433 +
2434 +	wbinvd();
2435 +
2436 +	while (1) {
2437 +		/*
2438 +		 * The CLFLUSH is a workaround for erratum AAI65 for
2439 +		 * the Xeon 7400 series.  It's not clear it is actually
2440 +		 * needed, but it should be harmless in either case.
2441 +		 * The WBINVD is insufficient due to the spurious-wakeup
2442 +		 * case where we return around the loop.
2443 +		 */
2444 +		clflush(mwait_ptr);
2445 +		__monitor(mwait_ptr, 0, 0);
2446 +		mb();
2447 +		__mwait(eax, 0);
2448 +	}
2449 +}
2450 +
2451 +static inline void hlt_play_dead(void)
2452 +{
2453 +	if (current_cpu_data.x86 >= 4)
2454 +		wbinvd();
2455 +
2456 +	while (1) {
2457 +		native_halt();
2458 +	}
2459 +}
2460 +
2461  void native_play_dead(void)
2462  {
2463  	play_dead_common();
2464  	tboot_shutdown(TB_SHUTDOWN_WFS);
2465 -	wbinvd_halt();
2466 +
2467 +	mwait_play_dead();	/* Only returns on failure */
2468 +	hlt_play_dead();
2469  }
2470  
2471  #else /* ... !CONFIG_HOTPLUG_CPU */
2472 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
2473 index d78c5ed..e00d1af 100644
2474 --- a/arch/x86/kernel/syscall_table_32.S
2475 +++ b/arch/x86/kernel/syscall_table_32.S
2476 @@ -345,12 +345,14 @@ ENTRY(sys_call_table)
2477  	.long sys_complete_job
2478  	.long sys_od_open
2479  	.long sys_od_close
2480 -	.long sys_fmlp_down
2481 -	.long sys_fmlp_up
2482 -	.long sys_srp_down
2483 -	.long sys_srp_up
2484 +	.long sys_litmus_lock
2485 +	.long sys_litmus_unlock
2486  	.long sys_query_job_no
2487  	.long sys_wait_for_job_release
2488  	.long sys_wait_for_ts_release
2489  	.long sys_release_ts
2490  	.long sys_null_call
2491 +	.long sys_set_rt_task_mc_param
2492 +	.long sys_change_speed
2493 +	.long sys_register_pid
2494 +	.long sys_get_job_report
2495 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
2496 index 60788de..9f4edeb 100644
2497 --- a/arch/x86/kernel/traps.c
2498 +++ b/arch/x86/kernel/traps.c
2499 @@ -575,6 +575,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
2500  	if (regs->flags & X86_VM_MASK) {
2501  		handle_vm86_trap((struct kernel_vm86_regs *) regs,
2502  				error_code, 1);
2503 +		preempt_conditional_cli(regs);
2504  		return;
2505  	}
2506  
2507 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
2508 index 5ffb5622..61fb985 100644
2509 --- a/arch/x86/kernel/vm86_32.c
2510 +++ b/arch/x86/kernel/vm86_32.c
2511 @@ -551,8 +551,14 @@ cannot_handle:
2512  int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
2513  {
2514  	if (VMPI.is_vm86pus) {
2515 -		if ((trapno == 3) || (trapno == 1))
2516 -			return_to_32bit(regs, VM86_TRAP + (trapno << 8));
2517 +		if ((trapno == 3) || (trapno == 1)) {
2518 +			KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
2519 +			/* setting this flag forces the code in entry_32.S to
2520 +			   call save_v86_state() and change the stack pointer
2521 +			   to KVM86->regs32 */
2522 +			set_thread_flag(TIF_IRET);
2523 +			return 0;
2524 +		}
2525  		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
2526  		return 0;
2527  	}
2528 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
2529 index 9c253bd..5471285 100644
2530 --- a/arch/x86/kernel/xsave.c
2531 +++ b/arch/x86/kernel/xsave.c
2532 @@ -394,7 +394,8 @@ static void __init setup_xstate_init(void)
2533  	 * Setup init_xstate_buf to represent the init state of
2534  	 * all the features managed by the xsave
2535  	 */
2536 -	init_xstate_buf = alloc_bootmem(xstate_size);
2537 +	init_xstate_buf = alloc_bootmem_align(xstate_size,
2538 +					      __alignof__(struct xsave_struct));
2539  	init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
2540  
2541  	clts();
2542 diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
2543 index 4b7b73c..9f163e6 100644
2544 --- a/arch/x86/kvm/i8259.c
2545 +++ b/arch/x86/kvm/i8259.c
2546 @@ -570,6 +570,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
2547  	s->pics[1].elcr_mask = 0xde;
2548  	s->pics[0].pics_state = s;
2549  	s->pics[1].pics_state = s;
2550 +	s->pics[0].isr_ack = 0xff;
2551 +	s->pics[1].isr_ack = 0xff;
2552  
2553  	/*
2554  	 * Initialize PIO device
2555 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
2556 index 311f6da..7fed5b7 100644
2557 --- a/arch/x86/kvm/mmu.c
2558 +++ b/arch/x86/kvm/mmu.c
2559 @@ -2254,6 +2254,10 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2560  		return 0;
2561  	}
2562  	direct = !is_paging(vcpu);
2563 +
2564 +	if (mmu_check_root(vcpu, root_gfn))
2565 +		return 1;
2566 +
2567  	for (i = 0; i < 4; ++i) {
2568  		hpa_t root = vcpu->arch.mmu.pae_root[i];
2569  
2570 @@ -2265,13 +2269,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2571  				continue;
2572  			}
2573  			root_gfn = pdptr >> PAGE_SHIFT;
2574 +			if (mmu_check_root(vcpu, root_gfn))
2575 +				return 1;
2576  		} else if (vcpu->arch.mmu.root_level == 0)
2577  			root_gfn = 0;
2578 -		if (mmu_check_root(vcpu, root_gfn))
2579 -			return 1;
2580  		if (tdp_enabled) {
2581  			direct = 1;
2582 -			root_gfn = i << 30;
2583 +			root_gfn = i << (30 - PAGE_SHIFT);
2584  		}
2585  		spin_lock(&vcpu->kvm->mmu_lock);
2586  		kvm_mmu_free_some_pages(vcpu);
2587 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
2588 index 8a3f9f6..e7c3f3b 100644
2589 --- a/arch/x86/kvm/svm.c
2590 +++ b/arch/x86/kvm/svm.c
2591 @@ -88,6 +88,14 @@ struct nested_state {
2592  	/* A VMEXIT is required but not yet emulated */
2593  	bool exit_required;
2594  
2595 +	/*
2596 +	 * If we vmexit during an instruction emulation we need this to restore
2597 +	 * the l1 guest rip after the emulation
2598 +	 */
2599 +	unsigned long vmexit_rip;
2600 +	unsigned long vmexit_rsp;
2601 +	unsigned long vmexit_rax;
2602 +
2603  	/* cache for intercepts of the guest */
2604  	u16 intercept_cr_read;
2605  	u16 intercept_cr_write;
2606 @@ -1206,8 +1214,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2607  		if (old == new) {
2608  			/* cr0 write with ts and mp unchanged */
2609  			svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2610 -			if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
2611 +			if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
2612 +				svm->nested.vmexit_rip = kvm_rip_read(vcpu);
2613 +				svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
2614 +				svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
2615  				return;
2616 +			}
2617  		}
2618  	}
2619  
2620 @@ -2399,6 +2411,23 @@ static int emulate_on_interception(struct vcpu_svm *svm)
2621  	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
2622  }
2623  
2624 +static int cr0_write_interception(struct vcpu_svm *svm)
2625 +{
2626 +	struct kvm_vcpu *vcpu = &svm->vcpu;
2627 +	int r;
2628 +
2629 +	r = emulate_instruction(&svm->vcpu, 0, 0, 0);
2630 +
2631 +	if (svm->nested.vmexit_rip) {
2632 +		kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
2633 +		kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
2634 +		kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
2635 +		svm->nested.vmexit_rip = 0;
2636 +	}
2637 +
2638 +	return r == EMULATE_DONE;
2639 +}
2640 +
2641  static int cr8_write_interception(struct vcpu_svm *svm)
2642  {
2643  	struct kvm_run *kvm_run = svm->vcpu.run;
2644 @@ -2672,7 +2701,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2645  	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
2646  	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
2647  	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
2648 -	[SVM_EXIT_WRITE_CR0]			= emulate_on_interception,
2649 +	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
2650  	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
2651  	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
2652  	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
2653 @@ -3252,6 +3281,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2654  	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
2655  
2656  	load_host_msrs(vcpu);
2657 +	kvm_load_ldt(ldt_selector);
2658  	loadsegment(fs, fs_selector);
2659  #ifdef CONFIG_X86_64
2660  	load_gs_index(gs_selector);
2661 @@ -3259,7 +3289,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2662  #else
2663  	loadsegment(gs, gs_selector);
2664  #endif
2665 -	kvm_load_ldt(ldt_selector);
2666  
2667  	reload_tss(vcpu);
2668  
2669 @@ -3354,6 +3383,14 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
2670  static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
2671  {
2672  	switch (func) {
2673 +	case 0x00000001:
2674 +		/* Mask out xsave bit as long as it is not supported by SVM */
2675 +		entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
2676 +		break;
2677 +	case 0x80000001:
2678 +		if (nested)
2679 +			entry->ecx |= (1 << 2); /* Set SVM bit */
2680 +		break;
2681  	case 0x8000000A:
2682  		entry->eax = 1; /* SVM revision 1 */
2683  		entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
2684 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2685 index 7bddfab..b3986fe 100644
2686 --- a/arch/x86/kvm/vmx.c
2687 +++ b/arch/x86/kvm/vmx.c
2688 @@ -828,10 +828,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2689  #endif
2690  
2691  #ifdef CONFIG_X86_64
2692 -	if (is_long_mode(&vmx->vcpu)) {
2693 -		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2694 +	rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2695 +	if (is_long_mode(&vmx->vcpu))
2696  		wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2697 -	}
2698  #endif
2699  	for (i = 0; i < vmx->save_nmsrs; ++i)
2700  		kvm_set_shared_msr(vmx->guest_msrs[i].index,
2701 @@ -846,23 +845,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2702  
2703  	++vmx->vcpu.stat.host_state_reload;
2704  	vmx->host_state.loaded = 0;
2705 -	if (vmx->host_state.fs_reload_needed)
2706 -		loadsegment(fs, vmx->host_state.fs_sel);
2707 +#ifdef CONFIG_X86_64
2708 +	if (is_long_mode(&vmx->vcpu))
2709 +		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2710 +#endif
2711  	if (vmx->host_state.gs_ldt_reload_needed) {
2712  		kvm_load_ldt(vmx->host_state.ldt_sel);
2713  #ifdef CONFIG_X86_64
2714  		load_gs_index(vmx->host_state.gs_sel);
2715 -		wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
2716  #else
2717  		loadsegment(gs, vmx->host_state.gs_sel);
2718  #endif
2719  	}
2720 +	if (vmx->host_state.fs_reload_needed)
2721 +		loadsegment(fs, vmx->host_state.fs_sel);
2722  	reload_tss();
2723  #ifdef CONFIG_X86_64
2724 -	if (is_long_mode(&vmx->vcpu)) {
2725 -		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2726 -		wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2727 -	}
2728 +	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2729  #endif
2730  	if (current_thread_info()->status & TS_USEDFPU)
2731  		clts();
2732 @@ -4249,11 +4248,6 @@ static int vmx_get_lpage_level(void)
2733  		return PT_PDPE_LEVEL;
2734  }
2735  
2736 -static inline u32 bit(int bitno)
2737 -{
2738 -	return 1 << (bitno & 31);
2739 -}
2740 -
2741  static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
2742  {
2743  	struct kvm_cpuid_entry2 *best;
2744 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2745 index 3a09c62..a5746de 100644
2746 --- a/arch/x86/kvm/x86.c
2747 +++ b/arch/x86/kvm/x86.c
2748 @@ -153,11 +153,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
2749  
2750  u64 __read_mostly host_xcr0;
2751  
2752 -static inline u32 bit(int bitno)
2753 -{
2754 -	return 1 << (bitno & 31);
2755 -}
2756 -
2757  static void kvm_on_user_return(struct user_return_notifier *urn)
2758  {
2759  	unsigned slot;
2760 @@ -1994,9 +1989,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2761  		0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
2762  	/* cpuid 0x80000001.ecx */
2763  	const u32 kvm_supported_word6_x86_features =
2764 -		F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
2765 +		F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
2766  		F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2767 -		F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
2768 +		F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
2769  		0 /* SKINIT */ | 0 /* WDT */;
2770  
2771  	/* all calls to cpuid_count() should be made on the same cpu */
2772 @@ -2305,6 +2300,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2773  		!kvm_exception_is_soft(vcpu->arch.exception.nr);
2774  	events->exception.nr = vcpu->arch.exception.nr;
2775  	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2776 +	events->exception.pad = 0;
2777  	events->exception.error_code = vcpu->arch.exception.error_code;
2778  
2779  	events->interrupt.injected =
2780 @@ -2318,12 +2314,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2781  	events->nmi.injected = vcpu->arch.nmi_injected;
2782  	events->nmi.pending = vcpu->arch.nmi_pending;
2783  	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2784 +	events->nmi.pad = 0;
2785  
2786  	events->sipi_vector = vcpu->arch.sipi_vector;
2787  
2788  	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2789  			 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2790  			 | KVM_VCPUEVENT_VALID_SHADOW);
2791 +	memset(&events->reserved, 0, sizeof(events->reserved));
2792  }
2793  
2794  static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2795 @@ -2366,6 +2364,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2796  	dbgregs->dr6 = vcpu->arch.dr6;
2797  	dbgregs->dr7 = vcpu->arch.dr7;
2798  	dbgregs->flags = 0;
2799 +	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2800  }
2801  
2802  static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2803 @@ -2849,6 +2848,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2804  		sizeof(ps->channels));
2805  	ps->flags = kvm->arch.vpit->pit_state.flags;
2806  	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2807 +	memset(&ps->reserved, 0, sizeof(ps->reserved));
2808  	return r;
2809  }
2810  
2811 @@ -2912,10 +2912,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2812  		struct kvm_memslots *slots, *old_slots;
2813  		unsigned long *dirty_bitmap;
2814  
2815 -		spin_lock(&kvm->mmu_lock);
2816 -		kvm_mmu_slot_remove_write_access(kvm, log->slot);
2817 -		spin_unlock(&kvm->mmu_lock);
2818 -
2819  		r = -ENOMEM;
2820  		dirty_bitmap = vmalloc(n);
2821  		if (!dirty_bitmap)
2822 @@ -2937,6 +2933,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2823  		dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2824  		kfree(old_slots);
2825  
2826 +		spin_lock(&kvm->mmu_lock);
2827 +		kvm_mmu_slot_remove_write_access(kvm, log->slot);
2828 +		spin_unlock(&kvm->mmu_lock);
2829 +
2830  		r = -EFAULT;
2831  		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
2832  			vfree(dirty_bitmap);
2833 @@ -3229,6 +3229,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
2834  		now_ns = timespec_to_ns(&now);
2835  		user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
2836  		user_ns.flags = 0;
2837 +		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
2838  
2839  		r = -EFAULT;
2840  		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
2841 @@ -5111,6 +5112,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2842  
2843  	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
2844  	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2845 +	if (sregs->cr4 & X86_CR4_OSXSAVE)
2846 +		update_cpuid(vcpu);
2847  	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
2848  		load_pdptrs(vcpu, vcpu->arch.cr3);
2849  		mmu_reset_needed = 1;
2850 diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
2851 index b7a4047..0bf3274 100644
2852 --- a/arch/x86/kvm/x86.h
2853 +++ b/arch/x86/kvm/x86.h
2854 @@ -65,6 +65,11 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
2855  	return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
2856  }
2857  
2858 +static inline u32 bit(int bitno)
2859 +{
2860 +	return 1 << (bitno & 31);
2861 +}
2862 +
2863  void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
2864  void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
2865  
2866 diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
2867 index b67a6b5..4262331 100644
2868 --- a/arch/x86/oprofile/op_model_amd.c
2869 +++ b/arch/x86/oprofile/op_model_amd.c
2870 @@ -484,21 +484,29 @@ static int __init_ibs_nmi(void)
2871  	return 0;
2872  }
2873  
2874 -/* initialize the APIC for the IBS interrupts if available */
2875 +/*
2876 + * check and reserve APIC extended interrupt LVT offset for IBS if
2877 + * available
2878 + *
2879 + * init_ibs() preforms implicitly cpu-local operations, so pin this
2880 + * thread to its current CPU
2881 + */
2882 +
2883  static void init_ibs(void)
2884  {
2885 -	ibs_caps = get_ibs_caps();
2886 +	preempt_disable();
2887  
2888 +	ibs_caps = get_ibs_caps();
2889  	if (!ibs_caps)
2890 -		return;
2891 +		goto out;
2892  
2893 -	if (__init_ibs_nmi()) {
2894 +	if (__init_ibs_nmi() < 0)
2895  		ibs_caps = 0;
2896 -		return;
2897 -	}
2898 +	else
2899 +		printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
2900  
2901 -	printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n",
2902 -	       (unsigned)ibs_caps);
2903 +out:
2904 +	preempt_enable();
2905  }
2906  
2907  static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
2908 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
2909 index 4a2afa1..b6552b1 100644
2910 --- a/arch/x86/vdso/Makefile
2911 +++ b/arch/x86/vdso/Makefile
2912 @@ -25,7 +25,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
2913  
2914  export CPPFLAGS_vdso.lds += -P -C
2915  
2916 -VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
2917 +VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
2918  		      	-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
2919  
2920  $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
2921 @@ -69,7 +69,7 @@ vdso32.so-$(VDSO32-y)		+= sysenter
2922  vdso32-images			= $(vdso32.so-y:%=vdso32-%.so)
2923  
2924  CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
2925 -VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
2926 +VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
2927  
2928  # This makes sure the $(obj) subdirectory exists even though vdso32/
2929  # is not a kbuild sub-make subdirectory.
2930 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
2931 index 7d46c84..0f6cd14 100644
2932 --- a/arch/x86/xen/enlighten.c
2933 +++ b/arch/x86/xen/enlighten.c
2934 @@ -1017,10 +1017,6 @@ static void xen_reboot(int reason)
2935  {
2936  	struct sched_shutdown r = { .reason = reason };
2937  
2938 -#ifdef CONFIG_SMP
2939 -	smp_send_stop();
2940 -#endif
2941 -
2942  	if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
2943  		BUG();
2944  }
2945 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
2946 index 25f232b..f4d0100 100644
2947 --- a/arch/x86/xen/smp.c
2948 +++ b/arch/x86/xen/smp.c
2949 @@ -400,9 +400,9 @@ static void stop_self(void *v)
2950  	BUG();
2951  }
2952  
2953 -static void xen_smp_send_stop(void)
2954 +static void xen_stop_other_cpus(int wait)
2955  {
2956 -	smp_call_function(stop_self, NULL, 0);
2957 +	smp_call_function(stop_self, NULL, wait);
2958  }
2959  
2960  static void xen_smp_send_reschedule(int cpu)
2961 @@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
2962  	.cpu_disable = xen_cpu_disable,
2963  	.play_dead = xen_play_dead,
2964  
2965 -	.smp_send_stop = xen_smp_send_stop,
2966 +	.stop_other_cpus = xen_stop_other_cpus,
2967  	.smp_send_reschedule = xen_smp_send_reschedule,
2968  
2969  	.send_call_func_ipi = xen_smp_send_call_function_ipi,
2970 diff --git a/block/blk-map.c b/block/blk-map.c
2971 index ade0a08..267a57b 100644
2972 --- a/block/blk-map.c
2973 +++ b/block/blk-map.c
2974 @@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
2975  			unaligned = 1;
2976  			break;
2977  		}
2978 +		if (!iov[i].iov_len)
2979 +			return -EINVAL;
2980  	}
2981  
2982  	if (unaligned || (q->dma_pad_mask & len) || map_data)
2983 diff --git a/block/blk-merge.c b/block/blk-merge.c
2984 index eafc94f..c24bf43 100644
2985 --- a/block/blk-merge.c
2986 +++ b/block/blk-merge.c
2987 @@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
2988  		return 0;
2989  
2990  	fbio = bio;
2991 -	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
2992 +	cluster = blk_queue_cluster(q);
2993  	seg_size = 0;
2994  	nr_phys_segs = 0;
2995  	for_each_bio(bio) {
2996 @@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
2997  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
2998  				   struct bio *nxt)
2999  {
3000 -	if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
3001 +	if (!blk_queue_cluster(q))
3002  		return 0;
3003  
3004  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
3005 @@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
3006  	int nsegs, cluster;
3007  
3008  	nsegs = 0;
3009 -	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
3010 +	cluster = blk_queue_cluster(q);
3011  
3012  	/*
3013  	 * for each bio in rq
3014 diff --git a/block/blk-settings.c b/block/blk-settings.c
3015 index a234f4b..ea9430d 100644
3016 --- a/block/blk-settings.c
3017 +++ b/block/blk-settings.c
3018 @@ -125,7 +125,7 @@ void blk_set_default_limits(struct queue_limits *lim)
3019  	lim->alignment_offset = 0;
3020  	lim->io_opt = 0;
3021  	lim->misaligned = 0;
3022 -	lim->no_cluster = 0;
3023 +	lim->cluster = 1;
3024  }
3025  EXPORT_SYMBOL(blk_set_default_limits);
3026  
3027 @@ -343,7 +343,7 @@ EXPORT_SYMBOL(blk_queue_logical_block_size);
3028   *   hardware can operate on without reverting to read-modify-write
3029   *   operations.
3030   */
3031 -void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
3032 +void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
3033  {
3034  	q->limits.physical_block_size = size;
3035  
3036 @@ -468,15 +468,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
3037  void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
3038  {
3039  	blk_stack_limits(&t->limits, &b->limits, 0);
3040 -
3041 -	if (!t->queue_lock)
3042 -		WARN_ON_ONCE(1);
3043 -	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
3044 -		unsigned long flags;
3045 -		spin_lock_irqsave(t->queue_lock, flags);
3046 -		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
3047 -		spin_unlock_irqrestore(t->queue_lock, flags);
3048 -	}
3049  }
3050  EXPORT_SYMBOL(blk_queue_stack_limits);
3051  
3052 @@ -547,7 +538,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
3053  	t->io_min = max(t->io_min, b->io_min);
3054  	t->io_opt = lcm(t->io_opt, b->io_opt);
3055  
3056 -	t->no_cluster |= b->no_cluster;
3057 +	t->cluster &= b->cluster;
3058  	t->discard_zeroes_data &= b->discard_zeroes_data;
3059  
3060  	/* Physical block size a multiple of the logical block size? */
3061 @@ -643,7 +634,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
3062  		       sector_t offset)
3063  {
3064  	struct request_queue *t = disk->queue;
3065 -	struct request_queue *b = bdev_get_queue(bdev);
3066  
3067  	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
3068  		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
3069 @@ -654,17 +644,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
3070  		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
3071  		       top, bottom);
3072  	}
3073 -
3074 -	if (!t->queue_lock)
3075 -		WARN_ON_ONCE(1);
3076 -	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
3077 -		unsigned long flags;
3078 -
3079 -		spin_lock_irqsave(t->queue_lock, flags);
3080 -		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
3081 -			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
3082 -		spin_unlock_irqrestore(t->queue_lock, flags);
3083 -	}
3084  }
3085  EXPORT_SYMBOL(disk_stack_limits);
3086  
3087 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
3088 index 0749b89..a26c930 100644
3089 --- a/block/blk-sysfs.c
3090 +++ b/block/blk-sysfs.c
3091 @@ -114,7 +114,7 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
3092  
3093  static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
3094  {
3095 -	if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
3096 +	if (blk_queue_cluster(q))
3097  		return queue_var_show(queue_max_segment_size(q), (page));
3098  
3099  	return queue_var_show(PAGE_CACHE_SIZE, (page));
3100 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
3101 index 9eba291..909479c 100644
3102 --- a/block/cfq-iosched.c
3103 +++ b/block/cfq-iosched.c
3104 @@ -3402,6 +3402,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3105  {
3106  	struct cfq_io_context *cic = cfqd->active_cic;
3107  
3108 +	/* If the queue already has requests, don't wait */
3109 +	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3110 +		return false;
3111 +
3112  	/* If there are other queues in the group, don't wait */
3113  	if (cfqq->cfqg->nr_cfqq > 1)
3114  		return false;
3115 diff --git a/block/genhd.c b/block/genhd.c
3116 index 59a2db6..315afd2 100644
3117 --- a/block/genhd.c
3118 +++ b/block/genhd.c
3119 @@ -541,13 +541,15 @@ void add_disk(struct gendisk *disk)
3120  	disk->major = MAJOR(devt);
3121  	disk->first_minor = MINOR(devt);
3122  
3123 +	/* Register BDI before referencing it from bdev */
3124 +	bdi = &disk->queue->backing_dev_info;
3125 +	bdi_register_dev(bdi, disk_devt(disk));
3126 +
3127  	blk_register_region(disk_devt(disk), disk->minors, NULL,
3128  			    exact_match, exact_lock, disk);
3129  	register_disk(disk);
3130  	blk_register_queue(disk);
3131  
3132 -	bdi = &disk->queue->backing_dev_info;
3133 -	bdi_register_dev(bdi, disk_devt(disk));
3134  	retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
3135  				   "bdi");
3136  	WARN_ON(retval);
3137 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
3138 index a8b5a10..4f4230b 100644
3139 --- a/block/scsi_ioctl.c
3140 +++ b/block/scsi_ioctl.c
3141 @@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
3142  	if (hdr->iovec_count) {
3143  		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
3144  		size_t iov_data_len;
3145 -		struct sg_iovec *iov;
3146 +		struct sg_iovec *sg_iov;
3147 +		struct iovec *iov;
3148 +		int i;
3149  
3150 -		iov = kmalloc(size, GFP_KERNEL);
3151 -		if (!iov) {
3152 +		sg_iov = kmalloc(size, GFP_KERNEL);
3153 +		if (!sg_iov) {
3154  			ret = -ENOMEM;
3155  			goto out;
3156  		}
3157  
3158 -		if (copy_from_user(iov, hdr->dxferp, size)) {
3159 -			kfree(iov);
3160 +		if (copy_from_user(sg_iov, hdr->dxferp, size)) {
3161 +			kfree(sg_iov);
3162  			ret = -EFAULT;
3163  			goto out;
3164  		}
3165  
3166 +		/*
3167 +		 * Sum up the vecs, making sure they don't overflow
3168 +		 */
3169 +		iov = (struct iovec *) sg_iov;
3170 +		iov_data_len = 0;
3171 +		for (i = 0; i < hdr->iovec_count; i++) {
3172 +			if (iov_data_len + iov[i].iov_len < iov_data_len) {
3173 +				kfree(sg_iov);
3174 +				ret = -EINVAL;
3175 +				goto out;
3176 +			}
3177 +			iov_data_len += iov[i].iov_len;
3178 +		}
3179 +
3180  		/* SG_IO howto says that the shorter of the two wins */
3181 -		iov_data_len = iov_length((struct iovec *)iov,
3182 -					  hdr->iovec_count);
3183  		if (hdr->dxfer_len < iov_data_len) {
3184 -			hdr->iovec_count = iov_shorten((struct iovec *)iov,
3185 +			hdr->iovec_count = iov_shorten(iov,
3186  						       hdr->iovec_count,
3187  						       hdr->dxfer_len);
3188  			iov_data_len = hdr->dxfer_len;
3189  		}
3190  
3191 -		ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
3192 +		ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
3193  					  iov_data_len, GFP_KERNEL);
3194 -		kfree(iov);
3195 +		kfree(sg_iov);
3196  	} else if (hdr->dxfer_len)
3197  		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
3198  				      GFP_KERNEL);
3199 diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
3200 index d555b37..6b0b5d0 100644
3201 --- a/drivers/acpi/acpica/dswexec.c
3202 +++ b/drivers/acpi/acpica/dswexec.c
3203 @@ -300,10 +300,25 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
3204  			 * we must enter this object into the namespace.  The created
3205  			 * object is temporary and will be deleted upon completion of
3206  			 * the execution of this method.
3207 +			 *
3208 +			 * Note 10/2010: Except for the Scope() op. This opcode does
3209 +			 * not actually create a new object, it refers to an existing
3210 +			 * object. However, for Scope(), we want to indeed open a
3211 +			 * new scope.
3212  			 */
3213 -			status = acpi_ds_load2_begin_op(walk_state, NULL);
3214 +			if (op->common.aml_opcode != AML_SCOPE_OP) {
3215 +				status =
3216 +				    acpi_ds_load2_begin_op(walk_state, NULL);
3217 +			} else {
3218 +				status =
3219 +				    acpi_ds_scope_stack_push(op->named.node,
3220 +							     op->named.node->
3221 +							     type, walk_state);
3222 +				if (ACPI_FAILURE(status)) {
3223 +					return_ACPI_STATUS(status);
3224 +				}
3225 +			}
3226  		}
3227 -
3228  		break;
3229  
3230  	case AML_CLASS_EXECUTE:
3231 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
3232 index 9841720..4c0a0a3 100644
3233 --- a/drivers/acpi/battery.c
3234 +++ b/drivers/acpi/battery.c
3235 @@ -98,6 +98,7 @@ enum {
3236  	 * due to bad math.
3237  	 */
3238  	ACPI_BATTERY_QUIRK_SIGNED16_CURRENT,
3239 +	ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
3240  };
3241  
3242  struct acpi_battery {
3243 @@ -412,6 +413,8 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
3244  		result = extract_package(battery, buffer.pointer,
3245  				info_offsets, ARRAY_SIZE(info_offsets));
3246  	kfree(buffer.pointer);
3247 +	if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
3248 +		battery->full_charge_capacity = battery->design_capacity;
3249  	return result;
3250  }
3251  
3252 @@ -448,6 +451,10 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
3253  	    battery->rate_now != -1)
3254  		battery->rate_now = abs((s16)battery->rate_now);
3255  
3256 +	if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
3257 +	    && battery->capacity_now >= 0 && battery->capacity_now <= 100)
3258 +		battery->capacity_now = (battery->capacity_now *
3259 +				battery->full_charge_capacity) / 100;
3260  	return result;
3261  }
3262  
3263 @@ -561,6 +568,33 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
3264  	}
3265  }
3266  
3267 +/*
3268 + * According to the ACPI spec, some kinds of primary batteries can
3269 + * report percentage battery remaining capacity directly to OS.
3270 + * In this case, it reports the Last Full Charged Capacity == 100
3271 + * and BatteryPresentRate == 0xFFFFFFFF.
3272 + *
3273 + * Now we found some battery reports percentage remaining capacity
3274 + * even if it's rechargeable.
3275 + * https://bugzilla.kernel.org/show_bug.cgi?id=15979
3276 + *
3277 + * Handle this correctly so that they won't break userspace.
3278 + */
3279 +static void acpi_battery_quirks2(struct acpi_battery *battery)
3280 +{
3281 +	if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
3282 +		return ;
3283 +
3284 +        if (battery->full_charge_capacity == 100 &&
3285 +            battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN &&
3286 +            battery->capacity_now >=0 && battery->capacity_now <= 100) {
3287 +		set_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags);
3288 +		battery->full_charge_capacity = battery->design_capacity;
3289 +		battery->capacity_now = (battery->capacity_now *
3290 +				battery->full_charge_capacity) / 100;
3291 +	}
3292 +}
3293 +
3294  static int acpi_battery_update(struct acpi_battery *battery)
3295  {
3296  	int result, old_present = acpi_battery_present(battery);
3297 @@ -586,7 +620,9 @@ static int acpi_battery_update(struct acpi_battery *battery)
3298  	if (!battery->bat.dev)
3299  		sysfs_add_battery(battery);
3300  #endif
3301 -	return acpi_battery_get_state(battery);
3302 +	result = acpi_battery_get_state(battery);
3303 +	acpi_battery_quirks2(battery);
3304 +	return result;
3305  }
3306  
3307  /* --------------------------------------------------------------------------
3308 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
3309 index 310e3b9..d68bd61 100644
3310 --- a/drivers/acpi/bus.c
3311 +++ b/drivers/acpi/bus.c
3312 @@ -935,6 +935,12 @@ static int __init acpi_bus_init(void)
3313  		goto error1;
3314  	}
3315  
3316 +	/*
3317 +	 * _PDC control method may load dynamic SSDT tables,
3318 +	 * and we need to install the table handler before that.
3319 +	 */
3320 +	acpi_sysfs_init();
3321 +
3322  	acpi_early_processor_set_pdc();
3323  
3324  	/*
3325 @@ -1026,7 +1032,6 @@ static int __init acpi_init(void)
3326  	acpi_scan_init();
3327  	acpi_ec_init();
3328  	acpi_power_init();
3329 -	acpi_sysfs_init();
3330  	acpi_debugfs_init();
3331  	acpi_sleep_proc_init();
3332  	acpi_wakeup_device_init();
3333 diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
3334 index 7de27d4..74c4a39 100644
3335 --- a/drivers/acpi/debugfs.c
3336 +++ b/drivers/acpi/debugfs.c
3337 @@ -79,7 +79,7 @@ int __init acpi_debugfs_init(void)
3338  	if (!acpi_dir)
3339  		goto err;
3340  
3341 -	cm_dentry = debugfs_create_file("custom_method", S_IWUGO,
3342 +	cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
3343  					acpi_dir, NULL, &cm_fops);
3344  	if (!cm_dentry)
3345  		goto err;
3346 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
3347 index f31291b..7bff18b 100644
3348 --- a/drivers/acpi/ec.c
3349 +++ b/drivers/acpi/ec.c
3350 @@ -929,6 +929,9 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
3351  	ec_flag_msi, "MSI hardware", {
3352  	DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
3353  	{
3354 +	ec_flag_msi, "MSI hardware", {
3355 +	DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
3356 +	{
3357  	ec_validate_ecdt, "ASUS hardware", {
3358  	DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
3359  	{},
3360 diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
3361 index e5fdeeb..d1a0f5b 100644
3362 --- a/drivers/ata/ahci.h
3363 +++ b/drivers/ata/ahci.h
3364 @@ -72,6 +72,7 @@ enum {
3365  	AHCI_CMD_RESET		= (1 << 8),
3366  	AHCI_CMD_CLR_BUSY	= (1 << 10),
3367  
3368 +	RX_FIS_PIO_SETUP	= 0x20,	/* offset of PIO Setup FIS data */
3369  	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
3370  	RX_FIS_SDB		= 0x58, /* offset of SDB FIS data */
3371  	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
3372 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
3373 index 8eea309..137514d 100644
3374 --- a/drivers/ata/libahci.c
3375 +++ b/drivers/ata/libahci.c
3376 @@ -1830,12 +1830,24 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
3377  static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
3378  {
3379  	struct ahci_port_priv *pp = qc->ap->private_data;
3380 -	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
3381 +	u8 *rx_fis = pp->rx_fis;
3382  
3383  	if (pp->fbs_enabled)
3384 -		d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
3385 +		rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
3386 +
3387 +	/*
3388 +	 * After a successful execution of an ATA PIO data-in command,
3389 +	 * the device doesn't send D2H Reg FIS to update the TF and
3390 +	 * the host should take TF and E_Status from the preceding PIO
3391 +	 * Setup FIS.
3392 +	 */
3393 +	if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
3394 +	    !(qc->flags & ATA_QCFLAG_FAILED)) {
3395 +		ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
3396 +		qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
3397 +	} else
3398 +		ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
3399  
3400 -	ata_tf_from_fis(d2h_fis, &qc->result_tf);
3401  	return true;
3402  }
3403  
3404 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
3405 index a89172c..7bb6787 100644
3406 --- a/drivers/ata/libata-scsi.c
3407 +++ b/drivers/ata/libata-scsi.c
3408 @@ -2577,8 +2577,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
3409  		 *
3410  		 * If door lock fails, always clear sdev->locked to
3411  		 * avoid this infinite loop.
3412 +		 *
3413 +		 * This may happen before SCSI scan is complete.  Make
3414 +		 * sure qc->dev->sdev isn't NULL before dereferencing.
3415  		 */
3416 -		if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
3417 +		if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
3418  			qc->dev->sdev->locked = 0;
3419  
3420  		qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
3421 diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
3422 index e30c537..c55988b 100644
3423 --- a/drivers/ata/libata-sff.c
3424 +++ b/drivers/ata/libata-sff.c
3425 @@ -1532,11 +1532,10 @@ static unsigned int __ata_sff_port_intr(struct ata_port *ap,
3426  		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3427  			return ata_sff_idle_irq(ap);
3428  		break;
3429 -	case HSM_ST:
3430 -	case HSM_ST_LAST:
3431 -		break;
3432 -	default:
3433 +	case HSM_ST_IDLE:
3434  		return ata_sff_idle_irq(ap);
3435 +	default:
3436 +		break;
3437  	}
3438  
3439  	/* check main status, clearing INTRQ if needed */
3440 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
3441 index 8cc536e..d7d8026 100644
3442 --- a/drivers/ata/pata_mpc52xx.c
3443 +++ b/drivers/ata/pata_mpc52xx.c
3444 @@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
3445  };
3446  
3447  static struct ata_port_operations mpc52xx_ata_port_ops = {
3448 -	.inherits		= &ata_sff_port_ops,
3449 +	.inherits		= &ata_bmdma_port_ops,
3450  	.sff_dev_select		= mpc52xx_ata_dev_select,
3451  	.set_piomode		= mpc52xx_ata_set_piomode,
3452  	.set_dmamode		= mpc52xx_ata_set_dmamode,
3453 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
3454 index 4730c42..c51b8d2 100644
3455 --- a/drivers/ata/sata_via.c
3456 +++ b/drivers/ata/sata_via.c
3457 @@ -538,7 +538,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
3458  	return 0;
3459  }
3460  
3461 -static void svia_configure(struct pci_dev *pdev)
3462 +static void svia_configure(struct pci_dev *pdev, int board_id)
3463  {
3464  	u8 tmp8;
3465  
3466 @@ -577,7 +577,7 @@ static void svia_configure(struct pci_dev *pdev)
3467  	}
3468  
3469  	/*
3470 -	 * vt6421 has problems talking to some drives.  The following
3471 +	 * vt6420/1 has problems talking to some drives.  The following
3472  	 * is the fix from Joseph Chan <JosephChan@via.com.tw>.
3473  	 *
3474  	 * When host issues HOLD, device may send up to 20DW of data
3475 @@ -596,8 +596,9 @@ static void svia_configure(struct pci_dev *pdev)
3476  	 *
3477  	 * https://bugzilla.kernel.org/show_bug.cgi?id=15173
3478  	 * http://article.gmane.org/gmane.linux.ide/46352
3479 +	 * http://thread.gmane.org/gmane.linux.kernel/1062139
3480  	 */
3481 -	if (pdev->device == 0x3249) {
3482 +	if (board_id == vt6420 || board_id == vt6421) {
3483  		pci_read_config_byte(pdev, 0x52, &tmp8);
3484  		tmp8 |= 1 << 2;
3485  		pci_write_config_byte(pdev, 0x52, tmp8);
3486 @@ -652,7 +653,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3487  	if (rc)
3488  		return rc;
3489  
3490 -	svia_configure(pdev);
3491 +	svia_configure(pdev, board_id);
3492  
3493  	pci_set_master(pdev);
3494  	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
3495 diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
3496 index ab735a6..d773397 100644
3497 --- a/drivers/block/xen-blkfront.c
3498 +++ b/drivers/block/xen-blkfront.c
3499 @@ -71,7 +71,7 @@ struct blk_shadow {
3500  
3501  static const struct block_device_operations xlvbd_block_fops;
3502  
3503 -#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
3504 +#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
3505  
3506  /*
3507   * We have one of these per vbd, whether ide, scsi or 'other'.  They
3508 diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
3509 index 998833d..17361ba 100644
3510 --- a/drivers/bluetooth/hci_ldisc.c
3511 +++ b/drivers/bluetooth/hci_ldisc.c
3512 @@ -256,9 +256,16 @@ static int hci_uart_tty_open(struct tty_struct *tty)
3513  
3514  	BT_DBG("tty %p", tty);
3515  
3516 +	/* FIXME: This btw is bogus, nothing requires the old ldisc to clear
3517 +	   the pointer */
3518  	if (hu)
3519  		return -EEXIST;
3520  
3521 +	/* Error if the tty has no write op instead of leaving an exploitable
3522 +	   hole */
3523 +	if (tty->ops->write == NULL)
3524 +		return -EOPNOTSUPP;
3525 +
3526  	if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
3527  		BT_ERR("Can't allocate control structure");
3528  		return -ENFILE;
3529 diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
3530 index cd18493..aa5c782 100644
3531 --- a/drivers/char/agp/intel-agp.c
3532 +++ b/drivers/char/agp/intel-agp.c
3533 @@ -927,20 +927,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
3534  	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
3535  
3536  	/*
3537 -	* If the device has not been properly setup, the following will catch
3538 -	* the problem and should stop the system from crashing.
3539 -	* 20030610 - hamish@zot.org
3540 -	*/
3541 -	if (pci_enable_device(pdev)) {
3542 -		dev_err(&pdev->dev, "can't enable PCI device\n");
3543 -		agp_put_bridge(bridge);
3544 -		return -ENODEV;
3545 -	}
3546 -
3547 -	/*
3548  	* The following fixes the case where the BIOS has "forgotten" to
3549  	* provide an address range for the GART.
3550  	* 20030610 - hamish@zot.org
3551 +	* This happens before pci_enable_device() intentionally;
3552 +	* calling pci_enable_device() before assigning the resource
3553 +	* will result in the GART being disabled on machines with such
3554 +	* BIOSs (the GART ends up with a BAR starting at 0, which
3555 +	* conflicts a lot of other devices).
3556  	*/
3557  	r = &pdev->resource[0];
3558  	if (!r->start && r->end) {
3559 @@ -951,6 +945,17 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
3560  		}
3561  	}
3562  
3563 +	/*
3564 +	* If the device has not been properly setup, the following will catch
3565 +	* the problem and should stop the system from crashing.
3566 +	* 20030610 - hamish@zot.org
3567 +	*/
3568 +	if (pci_enable_device(pdev)) {
3569 +		dev_err(&pdev->dev, "can't enable PCI device\n");
3570 +		agp_put_bridge(bridge);
3571 +		return -ENODEV;
3572 +	}
3573 +
3574  	/* Fill in the mode register */
3575  	if (cap_ptr) {
3576  		pci_read_config_dword(pdev,
3577 @@ -1049,6 +1054,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
3578  	ID(PCI_DEVICE_ID_INTEL_G45_HB),
3579  	ID(PCI_DEVICE_ID_INTEL_G41_HB),
3580  	ID(PCI_DEVICE_ID_INTEL_B43_HB),
3581 +	ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
3582  	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
3583  	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
3584  	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
3585 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
3586 index 75e0a34..6ea3bf6 100644
3587 --- a/drivers/char/agp/intel-gtt.c
3588 +++ b/drivers/char/agp/intel-gtt.c
3589 @@ -534,7 +534,7 @@ static void intel_i830_init_gtt_entries(void)
3590  
3591  	pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
3592  
3593 -	if (IS_I965) {
3594 +	if (IS_G33 || IS_I965) {
3595  		u32 pgetbl_ctl;
3596  		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
3597  
3598 @@ -567,22 +567,6 @@ static void intel_i830_init_gtt_entries(void)
3599  			size = 512;
3600  		}
3601  		size += 4; /* add in BIOS popup space */
3602 -	} else if (IS_G33 && !IS_PINEVIEW) {
3603 -	/* G33's GTT size defined in gmch_ctrl */
3604 -		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
3605 -		case G33_PGETBL_SIZE_1M:
3606 -			size = 1024;
3607 -			break;
3608 -		case G33_PGETBL_SIZE_2M:
3609 -			size = 2048;
3610 -			break;
3611 -		default:
3612 -			dev_info(&agp_bridge->dev->dev,
3613 -				 "unknown page table size 0x%x, assuming 512KB\n",
3614 -				(gmch_ctrl & G33_PGETBL_SIZE_MASK));
3615 -			size = 512;
3616 -		}
3617 -		size += 4;
3618  	} else if (IS_G4X || IS_PINEVIEW) {
3619  		/* On 4 series hardware, GTT stolen is separate from graphics
3620  		 * stolen, ignore it in stolen gtt entries counting.  However,
3621 @@ -1257,24 +1241,31 @@ static int intel_i915_get_gtt_size(void)
3622  	int size;
3623  
3624  	if (IS_G33) {
3625 -		u16 gmch_ctrl;
3626 +		u32 pgetbl_ctl;
3627 +		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
3628  
3629 -		/* G33's GTT size defined in gmch_ctrl */
3630 -		pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
3631 -		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
3632 -		case I830_GMCH_GMS_STOLEN_512:
3633 +		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
3634 +		case I965_PGETBL_SIZE_128KB:
3635 +			size = 128;
3636 +			break;
3637 +		case I965_PGETBL_SIZE_256KB:
3638 +			size = 256;
3639 +			break;
3640 +		case I965_PGETBL_SIZE_512KB:
3641  			size = 512;
3642  			break;
3643 -		case I830_GMCH_GMS_STOLEN_1024:
3644 +		case I965_PGETBL_SIZE_1MB:
3645  			size = 1024;
3646  			break;
3647 -		case I830_GMCH_GMS_STOLEN_8192:
3648 -			size = 8*1024;
3649 +		case I965_PGETBL_SIZE_2MB:
3650 +			size = 2048;
3651 +			break;
3652 +		case I965_PGETBL_SIZE_1_5MB:
3653 +			size = 1024 + 512;
3654  			break;
3655  		default:
3656 -			dev_info(&agp_bridge->dev->dev,
3657 -				 "unknown page table size 0x%x, assuming 512KB\n",
3658 -				(gmch_ctrl & I830_GMCH_GMS_MASK));
3659 +			dev_info(&intel_private.pcidev->dev,
3660 +				 "unknown page table size, assuming 512KB\n");
3661  			size = 512;
3662  		}
3663  	} else {
3664 @@ -1306,14 +1297,6 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
3665  	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
3666  	pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
3667  
3668 -	gtt_map_size = intel_i915_get_gtt_size();
3669 -
3670 -	intel_private.gtt = ioremap(temp2, gtt_map_size);
3671 -	if (!intel_private.gtt)
3672 -		return -ENOMEM;
3673 -
3674 -	intel_private.gtt_total_size = gtt_map_size / 4;
3675 -
3676  	temp &= 0xfff80000;
3677  
3678  	intel_private.registers = ioremap(temp, 128 * 4096);
3679 @@ -1322,6 +1305,14 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
3680  		return -ENOMEM;
3681  	}
3682  
3683 +	gtt_map_size = intel_i915_get_gtt_size();
3684 +
3685 +	intel_private.gtt = ioremap(temp2, gtt_map_size);
3686 +	if (!intel_private.gtt)
3687 +		return -ENOMEM;
3688 +
3689 +	intel_private.gtt_total_size = gtt_map_size / 4;
3690 +
3691  	temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
3692  	global_cache_flush();	/* FIXME: ? */
3693  
3694 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
3695 index a0a1829..f8e7d89 100644
3696 --- a/drivers/char/hpet.c
3697 +++ b/drivers/char/hpet.c
3698 @@ -479,6 +479,21 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
3699  	if (irq) {
3700  		unsigned long irq_flags;
3701  
3702 +		if (devp->hd_flags & HPET_SHARED_IRQ) {
3703 +			/*
3704 +			 * To prevent the interrupt handler from seeing an
3705 +			 * unwanted interrupt status bit, program the timer
3706 +			 * so that it will not fire in the near future ...
3707 +			 */
3708 +			writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
3709 +			       &timer->hpet_config);
3710 +			write_counter(read_counter(&hpet->hpet_mc),
3711 +				      &timer->hpet_compare);
3712 +			/* ... and clear any left-over status. */
3713 +			isr = 1 << (devp - devp->hd_hpets->hp_dev);
3714 +			writel(isr, &hpet->hpet_isr);
3715 +		}
3716 +
3717  		sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
3718  		irq_flags = devp->hd_flags & HPET_SHARED_IRQ
3719  						? IRQF_SHARED : IRQF_DISABLED;
3720 @@ -970,6 +985,8 @@ static int hpet_acpi_add(struct acpi_device *device)
3721  		return -ENODEV;
3722  
3723  	if (!data.hd_address || !data.hd_nirqs) {
3724 +		if (data.hd_address)
3725 +			iounmap(data.hd_address);
3726  		printk("%s: no address or irqs in _CRS\n", __func__);
3727  		return -ENODEV;
3728  	}
3729 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
3730 index 7bd7c45..22abd18 100644
3731 --- a/drivers/char/ipmi/ipmi_si_intf.c
3732 +++ b/drivers/char/ipmi/ipmi_si_intf.c
3733 @@ -320,6 +320,7 @@ static int unload_when_empty = 1;
3734  static int add_smi(struct smi_info *smi);
3735  static int try_smi_init(struct smi_info *smi);
3736  static void cleanup_one_si(struct smi_info *to_clean);
3737 +static void cleanup_ipmi_si(void);
3738  
3739  static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
3740  static int register_xaction_notifier(struct notifier_block *nb)
3741 @@ -1665,6 +1666,17 @@ static int check_hotmod_int_op(const char *curr, const char *option,
3742  	return 0;
3743  }
3744  
3745 +static struct smi_info *smi_info_alloc(void)
3746 +{
3747 +	struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
3748 +
3749 +	if (info) {
3750 +		spin_lock_init(&info->si_lock);
3751 +		spin_lock_init(&info->msg_lock);
3752 +	}
3753 +	return info;
3754 +}
3755 +
3756  static int hotmod_handler(const char *val, struct kernel_param *kp)
3757  {
3758  	char *str = kstrdup(val, GFP_KERNEL);
3759 @@ -1779,7 +1791,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
3760  		}
3761  
3762  		if (op == HM_ADD) {
3763 -			info = kzalloc(sizeof(*info), GFP_KERNEL);
3764 +			info = smi_info_alloc();
3765  			if (!info) {
3766  				rv = -ENOMEM;
3767  				goto out;
3768 @@ -1844,7 +1856,7 @@ static __devinit void hardcode_find_bmc(void)
3769  		if (!ports[i] && !addrs[i])
3770  			continue;
3771  
3772 -		info = kzalloc(sizeof(*info), GFP_KERNEL);
3773 +		info = smi_info_alloc();
3774  		if (!info)
3775  			return;
3776  
3777 @@ -2028,7 +2040,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
3778  		return -ENODEV;
3779  	}
3780  
3781 -	info = kzalloc(sizeof(*info), GFP_KERNEL);
3782 +	info = smi_info_alloc();
3783  	if (!info) {
3784  		printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
3785  		return -ENOMEM;
3786 @@ -2138,7 +2150,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
3787  	if (!acpi_dev)
3788  		return -ENODEV;
3789  
3790 -	info = kzalloc(sizeof(*info), GFP_KERNEL);
3791 +	info = smi_info_alloc();
3792  	if (!info)
3793  		return -ENOMEM;
3794  
3795 @@ -2319,7 +2331,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
3796  {
3797  	struct smi_info *info;
3798  
3799 -	info = kzalloc(sizeof(*info), GFP_KERNEL);
3800 +	info = smi_info_alloc();
3801  	if (!info) {
3802  		printk(KERN_ERR PFX "Could not allocate SI data\n");
3803  		return;
3804 @@ -2426,7 +2438,7 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
3805  	int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
3806  	struct smi_info *info;
3807  
3808 -	info = kzalloc(sizeof(*info), GFP_KERNEL);
3809 +	info = smi_info_alloc();
3810  	if (!info)
3811  		return -ENOMEM;
3812  
3813 @@ -2567,7 +2579,7 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
3814  		return -EINVAL;
3815  	}
3816  
3817 -	info = kzalloc(sizeof(*info), GFP_KERNEL);
3818 +	info = smi_info_alloc();
3819  
3820  	if (!info) {
3821  		dev_err(&dev->dev,
3822 @@ -3014,7 +3026,7 @@ static __devinit void default_find_bmc(void)
3823  		if (check_legacy_ioport(ipmi_defaults[i].port))
3824  			continue;
3825  #endif
3826 -		info = kzalloc(sizeof(*info), GFP_KERNEL);
3827 +		info = smi_info_alloc();
3828  		if (!info)
3829  			return;
3830  
3831 @@ -3139,9 +3151,6 @@ static int try_smi_init(struct smi_info *new_smi)
3832  		goto out_err;
3833  	}
3834  
3835 -	spin_lock_init(&(new_smi->si_lock));
3836 -	spin_lock_init(&(new_smi->msg_lock));
3837 -
3838  	/* Do low-level detection first. */
3839  	if (new_smi->handlers->detect(new_smi->si_sm)) {
3840  		if (new_smi->addr_source)
3841 @@ -3428,16 +3437,7 @@ static __devinit int init_ipmi_si(void)
3842  	mutex_lock(&smi_infos_lock);
3843  	if (unload_when_empty && list_empty(&smi_infos)) {
3844  		mutex_unlock(&smi_infos_lock);
3845 -#ifdef CONFIG_PCI
3846 -		if (pci_registered)
3847 -			pci_unregister_driver(&ipmi_pci_driver);
3848 -#endif
3849 -
3850 -#ifdef CONFIG_PPC_OF
3851 -		if (of_registered)
3852 -			of_unregister_platform_driver(&ipmi_of_platform_driver);
3853 -#endif
3854 -		driver_unregister(&ipmi_driver.driver);
3855 +		cleanup_ipmi_si();
3856  		printk(KERN_WARNING PFX
3857  		       "Unable to find any System Interface(s)\n");
3858  		return -ENODEV;
3859 diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c
3860 index 04ef3ef..0e62674 100644
3861 --- a/drivers/char/n_gsm.c
3862 +++ b/drivers/char/n_gsm.c
3863 @@ -716,8 +716,8 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
3864  		if (msg->len < 128)
3865  			*--dp = (msg->len << 1) | EA;
3866  		else {
3867 -			*--dp = (msg->len >> 6) | EA;
3868 -			*--dp = (msg->len & 127) << 1;
3869 +			*--dp = (msg->len >> 7);	/* bits 7 - 15 */
3870 +			*--dp = (msg->len & 127) << 1;	/* bits 0 - 6 */
3871  		}
3872  	}
3873  
3874 @@ -968,6 +968,8 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
3875  {
3876  	struct gsm_msg *msg;
3877  	msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
3878 +	if (msg == NULL)
3879 +		return;
3880  	msg->data[0] = (cmd & 0xFE) << 1 | EA;	/* Clear C/R */
3881  	msg->data[1] = (dlen << 1) | EA;
3882  	memcpy(msg->data + 2, data, dlen);
3883 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
3884 index 9ecd6be..45f9fad 100644
3885 --- a/drivers/char/pcmcia/synclink_cs.c
3886 +++ b/drivers/char/pcmcia/synclink_cs.c
3887 @@ -4127,6 +4127,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3888  	if (cmd != SIOCWANDEV)
3889  		return hdlc_ioctl(dev, ifr, cmd);
3890  
3891 +	memset(&new_line, 0, size);
3892 +
3893  	switch(ifr->ifr_settings.type) {
3894  	case IF_GET_IFACE: /* return current sync_serial_settings */
3895  
3896 diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
3897 index 74f00b5..9445f48 100644
3898 --- a/drivers/char/ramoops.c
3899 +++ b/drivers/char/ramoops.c
3900 @@ -27,7 +27,6 @@
3901  #include <linux/ioport.h>
3902  
3903  #define RAMOOPS_KERNMSG_HDR "===="
3904 -#define RAMOOPS_HEADER_SIZE   (5 + sizeof(struct timeval))
3905  
3906  #define RECORD_SIZE 4096
3907  
3908 @@ -63,8 +62,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
3909  			struct ramoops_context, dump);
3910  	unsigned long s1_start, s2_start;
3911  	unsigned long l1_cpy, l2_cpy;
3912 -	int res;
3913 -	char *buf;
3914 +	int res, hdr_size;
3915 +	char *buf, *buf_orig;
3916  	struct timeval timestamp;
3917  
3918  	/* Only dump oopses if dump_oops is set */
3919 @@ -72,6 +71,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
3920  		return;
3921  
3922  	buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
3923 +	buf_orig = buf;
3924 +
3925  	memset(buf, '\0', RECORD_SIZE);
3926  	res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
3927  	buf += res;
3928 @@ -79,8 +80,9 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
3929  	res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
3930  	buf += res;
3931  
3932 -	l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
3933 -	l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
3934 +	hdr_size = buf - buf_orig;
3935 +	l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size));
3936 +	l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy);
3937  
3938  	s2_start = l2 - l2_cpy;
3939  	s1_start = l1 - l1_cpy;
3940 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
3941 index 05ad4a1..2ec5f33 100644
3942 --- a/drivers/char/tpm/tpm.c
3943 +++ b/drivers/char/tpm/tpm.c
3944 @@ -354,12 +354,14 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
3945  		    tpm_protected_ordinal_duration[ordinal &
3946  						   TPM_PROTECTED_ORDINAL_MASK];
3947  
3948 -	if (duration_idx != TPM_UNDEFINED)
3949 +	if (duration_idx != TPM_UNDEFINED) {
3950  		duration = chip->vendor.duration[duration_idx];
3951 -	if (duration <= 0)
3952 +		/* if duration is 0, it's because chip->vendor.duration wasn't */
3953 +		/* filled yet, so we set the lowest timeout just to give enough */
3954 +		/* time for tpm_get_timeouts() to succeed */
3955 +		return (duration <= 0 ? HZ : duration);
3956 +	} else
3957  		return 2 * 60 * HZ;
3958 -	else
3959 -		return duration;
3960  }
3961  EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
3962  
3963 @@ -565,9 +567,11 @@ duration:
3964  	if (rc)
3965  		return;
3966  
3967 -	if (be32_to_cpu(tpm_cmd.header.out.return_code)
3968 -	    != 3 * sizeof(u32))
3969 +	if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
3970 +	    be32_to_cpu(tpm_cmd.header.out.length)
3971 +	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
3972  		return;
3973 +
3974  	duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
3975  	chip->vendor.duration[TPM_SHORT] =
3976  	    usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
3977 @@ -911,6 +915,18 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
3978  }
3979  EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
3980  
3981 +ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
3982 +			  char *buf)
3983 +{
3984 +	struct tpm_chip *chip = dev_get_drvdata(dev);
3985 +
3986 +	return sprintf(buf, "%d %d %d\n",
3987 +	               jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
3988 +	               jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
3989 +	               jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
3990 +}
3991 +EXPORT_SYMBOL_GPL(tpm_show_timeouts);
3992 +
3993  ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
3994  			const char *buf, size_t count)
3995  {
3996 diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
3997 index 792868d..ba1779c 100644
3998 --- a/drivers/char/tpm/tpm.h
3999 +++ b/drivers/char/tpm/tpm.h
4000 @@ -56,6 +56,8 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
4001  				char *);
4002  extern ssize_t tpm_show_temp_deactivated(struct device *,
4003  					 struct device_attribute *attr, char *);
4004 +extern ssize_t tpm_show_timeouts(struct device *,
4005 +				 struct device_attribute *attr, char *);
4006  
4007  struct tpm_chip;
4008  
4009 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
4010 index 1030f84..3e1f2bb 100644
4011 --- a/drivers/char/tpm/tpm_tis.c
4012 +++ b/drivers/char/tpm/tpm_tis.c
4013 @@ -25,6 +25,7 @@
4014  #include <linux/slab.h>
4015  #include <linux/interrupt.h>
4016  #include <linux/wait.h>
4017 +#include <linux/acpi.h>
4018  #include "tpm.h"
4019  
4020  #define TPM_HEADER_SIZE 10
4021 @@ -78,6 +79,26 @@ enum tis_defaults {
4022  static LIST_HEAD(tis_chips);
4023  static DEFINE_SPINLOCK(tis_lock);
4024  
4025 +#ifdef CONFIG_ACPI
4026 +static int is_itpm(struct pnp_dev *dev)
4027 +{
4028 +	struct acpi_device *acpi = pnp_acpi_device(dev);
4029 +	struct acpi_hardware_id *id;
4030 +
4031 +	list_for_each_entry(id, &acpi->pnp.ids, list) {
4032 +		if (!strcmp("INTC0102", id->id))
4033 +			return 1;
4034 +	}
4035 +
4036 +	return 0;
4037 +}
4038 +#else
4039 +static int is_itpm(struct pnp_dev *dev)
4040 +{
4041 +	return 0;
4042 +}
4043 +#endif
4044 +
4045  static int check_locality(struct tpm_chip *chip, int l)
4046  {
4047  	if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
4048 @@ -355,6 +376,7 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
4049  		   NULL);
4050  static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
4051  static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
4052 +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
4053  
4054  static struct attribute *tis_attrs[] = {
4055  	&dev_attr_pubek.attr,
4056 @@ -364,7 +386,8 @@ static struct attribute *tis_attrs[] = {
4057  	&dev_attr_owned.attr,
4058  	&dev_attr_temp_deactivated.attr,
4059  	&dev_attr_caps.attr,
4060 -	&dev_attr_cancel.attr, NULL,
4061 +	&dev_attr_cancel.attr,
4062 +	&dev_attr_timeouts.attr, NULL,
4063  };
4064  
4065  static struct attribute_group tis_attr_grp = {
4066 @@ -472,6 +495,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
4067  		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
4068  		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
4069  
4070 +	if (is_itpm(to_pnp_dev(dev)))
4071 +		itpm = 1;
4072 +
4073  	if (itpm)
4074  		dev_info(dev, "Intel iTPM workaround enabled\n");
4075  
4076 diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
4077 index cc1e985..d8210ca 100644
4078 --- a/drivers/char/tty_buffer.c
4079 +++ b/drivers/char/tty_buffer.c
4080 @@ -413,7 +413,8 @@ static void flush_to_ldisc(struct work_struct *work)
4081  	spin_lock_irqsave(&tty->buf.lock, flags);
4082  
4083  	if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
4084 -		struct tty_buffer *head;
4085 +		struct tty_buffer *head, *tail = tty->buf.tail;
4086 +		int seen_tail = 0;
4087  		while ((head = tty->buf.head) != NULL) {
4088  			int count;
4089  			char *char_buf;
4090 @@ -423,6 +424,15 @@ static void flush_to_ldisc(struct work_struct *work)
4091  			if (!count) {
4092  				if (head->next == NULL)
4093  					break;
4094 +				/*
4095 +				  There's a possibility tty might get new buffer
4096 +				  added during the unlock window below. We could
4097 +				  end up spinning in here forever hogging the CPU
4098 +				  completely. To avoid this let's have a rest each
4099 +				  time we processed the tail buffer.
4100 +				*/
4101 +				if (tail == head)
4102 +					seen_tail = 1;
4103  				tty->buf.head = head->next;
4104  				tty_buffer_free(tty, head);
4105  				continue;
4106 @@ -432,7 +442,7 @@ static void flush_to_ldisc(struct work_struct *work)
4107  			   line discipline as we want to empty the queue */
4108  			if (test_bit(TTY_FLUSHPENDING, &tty->flags))
4109  				break;
4110 -			if (!tty->receive_room) {
4111 +			if (!tty->receive_room || seen_tail) {
4112  				schedule_delayed_work(&tty->buf.work, 1);
4113  				break;
4114  			}
4115 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
4116 index 613c852..e0f7f4b 100644
4117 --- a/drivers/char/tty_io.c
4118 +++ b/drivers/char/tty_io.c
4119 @@ -553,6 +553,9 @@ void __tty_hangup(struct tty_struct *tty)
4120  
4121  	tty_lock();
4122  
4123 +	/* some functions below drop BTM, so we need this bit */
4124 +	set_bit(TTY_HUPPING, &tty->flags);
4125 +
4126  	/* inuse_filps is protected by the single tty lock,
4127  	   this really needs to change if we want to flush the
4128  	   workqueue with the lock held */
4129 @@ -572,6 +575,10 @@ void __tty_hangup(struct tty_struct *tty)
4130  	}
4131  	spin_unlock(&tty_files_lock);
4132  
4133 +	/*
4134 +	 * it drops BTM and thus races with reopen
4135 +	 * we protect the race by TTY_HUPPING
4136 +	 */
4137  	tty_ldisc_hangup(tty);
4138  
4139  	read_lock(&tasklist_lock);
4140 @@ -609,7 +616,6 @@ void __tty_hangup(struct tty_struct *tty)
4141  	tty->session = NULL;
4142  	tty->pgrp = NULL;
4143  	tty->ctrl_status = 0;
4144 -	set_bit(TTY_HUPPED, &tty->flags);
4145  	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
4146  
4147  	/* Account for the p->signal references we killed */
4148 @@ -635,6 +641,7 @@ void __tty_hangup(struct tty_struct *tty)
4149  	 * can't yet guarantee all that.
4150  	 */
4151  	set_bit(TTY_HUPPED, &tty->flags);
4152 +	clear_bit(TTY_HUPPING, &tty->flags);
4153  	tty_ldisc_enable(tty);
4154  
4155  	tty_unlock();
4156 @@ -1304,7 +1311,9 @@ static int tty_reopen(struct tty_struct *tty)
4157  {
4158  	struct tty_driver *driver = tty->driver;
4159  
4160 -	if (test_bit(TTY_CLOSING, &tty->flags))
4161 +	if (test_bit(TTY_CLOSING, &tty->flags) ||
4162 +			test_bit(TTY_HUPPING, &tty->flags) ||
4163 +			test_bit(TTY_LDISC_CHANGING, &tty->flags))
4164  		return -EIO;
4165  
4166  	if (driver->type == TTY_DRIVER_TYPE_PTY &&
4167 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
4168 index 412f977..4214d58 100644
4169 --- a/drivers/char/tty_ldisc.c
4170 +++ b/drivers/char/tty_ldisc.c
4171 @@ -47,6 +47,7 @@
4172  
4173  static DEFINE_SPINLOCK(tty_ldisc_lock);
4174  static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
4175 +static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
4176  /* Line disc dispatch table */
4177  static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
4178  
4179 @@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld)
4180  		return;
4181  	}
4182  	local_irq_restore(flags);
4183 +	wake_up(&tty_ldisc_idle);
4184  }
4185  
4186  /**
4187 @@ -452,6 +454,8 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
4188                  /* BTM here locks versus a hangup event */
4189  		WARN_ON(!tty_locked());
4190  		ret = ld->ops->open(tty);
4191 +		if (ret)
4192 +			clear_bit(TTY_LDISC_OPEN, &tty->flags);
4193  		return ret;
4194  	}
4195  	return 0;
4196 @@ -531,6 +535,23 @@ static int tty_ldisc_halt(struct tty_struct *tty)
4197  }
4198  
4199  /**
4200 + *	tty_ldisc_wait_idle	-	wait for the ldisc to become idle
4201 + *	@tty: tty to wait for
4202 + *
4203 + *	Wait for the line discipline to become idle. The discipline must
4204 + *	have been halted for this to guarantee it remains idle.
4205 + */
4206 +static int tty_ldisc_wait_idle(struct tty_struct *tty)
4207 +{
4208 +	int ret;
4209 +	ret = wait_event_interruptible_timeout(tty_ldisc_idle,
4210 +			atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
4211 +	if (ret < 0)
4212 +		return ret;
4213 +	return ret > 0 ? 0 : -EBUSY;
4214 +}
4215 +
4216 +/**
4217   *	tty_set_ldisc		-	set line discipline
4218   *	@tty: the terminal to set
4219   *	@ldisc: the line discipline
4220 @@ -634,8 +655,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
4221  
4222  	flush_scheduled_work();
4223  
4224 +	retval = tty_ldisc_wait_idle(tty);
4225 +
4226  	tty_lock();
4227  	mutex_lock(&tty->ldisc_mutex);
4228 +
4229 +	/* handle wait idle failure locked */
4230 +	if (retval) {
4231 +		tty_ldisc_put(new_ldisc);
4232 +		goto enable;
4233 +	}
4234 +
4235  	if (test_bit(TTY_HUPPED, &tty->flags)) {
4236  		/* We were raced by the hangup method. It will have stomped
4237  		   the ldisc data and closed the ldisc down */
4238 @@ -669,6 +699,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
4239  
4240  	tty_ldisc_put(o_ldisc);
4241  
4242 +enable:
4243  	/*
4244  	 *	Allow ldisc referencing to occur again
4245  	 */
4246 @@ -714,9 +745,12 @@ static void tty_reset_termios(struct tty_struct *tty)
4247   *	state closed
4248   */
4249  
4250 -static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
4251 +static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
4252  {
4253 -	struct tty_ldisc *ld;
4254 +	struct tty_ldisc *ld = tty_ldisc_get(ldisc);
4255 +
4256 +	if (IS_ERR(ld))
4257 +		return -1;
4258  
4259  	tty_ldisc_close(tty, tty->ldisc);
4260  	tty_ldisc_put(tty->ldisc);
4261 @@ -724,10 +758,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
4262  	/*
4263  	 *	Switch the line discipline back
4264  	 */
4265 -	ld = tty_ldisc_get(ldisc);
4266 -	BUG_ON(IS_ERR(ld));
4267  	tty_ldisc_assign(tty, ld);
4268  	tty_set_termios_ldisc(tty, ldisc);
4269 +
4270 +	return 0;
4271  }
4272  
4273  /**
4274 @@ -802,13 +836,16 @@ void tty_ldisc_hangup(struct tty_struct *tty)
4275  	   a FIXME */
4276  	if (tty->ldisc) {	/* Not yet closed */
4277  		if (reset == 0) {
4278 -			tty_ldisc_reinit(tty, tty->termios->c_line);
4279 -			err = tty_ldisc_open(tty, tty->ldisc);
4280 +
4281 +			if (!tty_ldisc_reinit(tty, tty->termios->c_line))
4282 +				err = tty_ldisc_open(tty, tty->ldisc);
4283 +			else
4284 +				err = 1;
4285  		}
4286  		/* If the re-open fails or we reset then go to N_TTY. The
4287  		   N_TTY open cannot fail */
4288  		if (reset || err) {
4289 -			tty_ldisc_reinit(tty, N_TTY);
4290 +			BUG_ON(tty_ldisc_reinit(tty, N_TTY));
4291  			WARN_ON(tty_ldisc_open(tty, tty->ldisc));
4292  		}
4293  		tty_ldisc_enable(tty);
4294 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
4295 index 0f69c5e..7dc855b 100644
4296 --- a/drivers/char/virtio_console.c
4297 +++ b/drivers/char/virtio_console.c
4298 @@ -1314,6 +1314,17 @@ static void control_work_handler(struct work_struct *work)
4299  	spin_unlock(&portdev->cvq_lock);
4300  }
4301  
4302 +static void out_intr(struct virtqueue *vq)
4303 +{
4304 +	struct port *port;
4305 +
4306 +	port = find_port_by_vq(vq->vdev->priv, vq);
4307 +	if (!port)
4308 +		return;
4309 +
4310 +	wake_up_interruptible(&port->waitqueue);
4311 +}
4312 +
4313  static void in_intr(struct virtqueue *vq)
4314  {
4315  	struct port *port;
4316 @@ -1430,7 +1441,7 @@ static int init_vqs(struct ports_device *portdev)
4317  	 */
4318  	j = 0;
4319  	io_callbacks[j] = in_intr;
4320 -	io_callbacks[j + 1] = NULL;
4321 +	io_callbacks[j + 1] = out_intr;
4322  	io_names[j] = "input";
4323  	io_names[j + 1] = "output";
4324  	j += 2;
4325 @@ -1444,7 +1455,7 @@ static int init_vqs(struct ports_device *portdev)
4326  		for (i = 1; i < nr_ports; i++) {
4327  			j += 2;
4328  			io_callbacks[j] = in_intr;
4329 -			io_callbacks[j + 1] = NULL;
4330 +			io_callbacks[j + 1] = out_intr;
4331  			io_names[j] = "input";
4332  			io_names[j + 1] = "output";
4333  		}
4334 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
4335 index 38df8c1..6b68a0f 100644
4336 --- a/drivers/char/vt_ioctl.c
4337 +++ b/drivers/char/vt_ioctl.c
4338 @@ -503,6 +503,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
4339  	struct kbd_struct * kbd;
4340  	unsigned int console;
4341  	unsigned char ucval;
4342 +	unsigned int uival;
4343  	void __user *up = (void __user *)arg;
4344  	int i, perm;
4345  	int ret = 0;
4346 @@ -657,7 +658,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
4347  		break;
4348  
4349  	case KDGETMODE:
4350 -		ucval = vc->vc_mode;
4351 +		uival = vc->vc_mode;
4352  		goto setint;
4353  
4354  	case KDMAPDISP:
4355 @@ -695,7 +696,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
4356  		break;
4357  
4358  	case KDGKBMODE:
4359 -		ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW :
4360 +		uival = ((kbd->kbdmode == VC_RAW) ? K_RAW :
4361  				 (kbd->kbdmode == VC_MEDIUMRAW) ? K_MEDIUMRAW :
4362  				 (kbd->kbdmode == VC_UNICODE) ? K_UNICODE :
4363  				 K_XLATE);
4364 @@ -717,9 +718,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
4365  		break;
4366  
4367  	case KDGKBMETA:
4368 -		ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
4369 +		uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
4370  	setint:
4371 -		ret = put_user(ucval, (int __user *)arg);
4372 +		ret = put_user(uival, (int __user *)arg);
4373  		break;
4374  
4375  	case KDGETKEYCODE:
4376 @@ -949,7 +950,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
4377  		for (i = 0; i < MAX_NR_CONSOLES; ++i)
4378  			if (! VT_IS_IN_USE(i))
4379  				break;
4380 -		ucval = i < MAX_NR_CONSOLES ? (i+1) : -1;
4381 +		uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
4382  		goto setint;		 
4383  
4384  	/*
4385 diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
4386 index a507108..97df791 100644
4387 --- a/drivers/cpuidle/cpuidle.c
4388 +++ b/drivers/cpuidle/cpuidle.c
4389 @@ -154,6 +154,45 @@ void cpuidle_resume_and_unlock(void)
4390  
4391  EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
4392  
4393 +#ifdef CONFIG_ARCH_HAS_CPU_RELAX
4394 +static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
4395 +{
4396 +	ktime_t	t1, t2;
4397 +	s64 diff;
4398 +	int ret;
4399 +
4400 +	t1 = ktime_get();
4401 +	local_irq_enable();
4402 +	while (!need_resched())
4403 +		cpu_relax();
4404 +
4405 +	t2 = ktime_get();
4406 +	diff = ktime_to_us(ktime_sub(t2, t1));
4407 +	if (diff > INT_MAX)
4408 +		diff = INT_MAX;
4409 +
4410 +	ret = (int) diff;
4411 +	return ret;
4412 +}
4413 +
4414 +static void poll_idle_init(struct cpuidle_device *dev)
4415 +{
4416 +	struct cpuidle_state *state = &dev->states[0];
4417 +
4418 +	cpuidle_set_statedata(state, NULL);
4419 +
4420 +	snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
4421 +	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
4422 +	state->exit_latency = 0;
4423 +	state->target_residency = 0;
4424 +	state->power_usage = -1;
4425 +	state->flags = CPUIDLE_FLAG_POLL;
4426 +	state->enter = poll_idle;
4427 +}
4428 +#else
4429 +static void poll_idle_init(struct cpuidle_device *dev) {}
4430 +#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
4431 +
4432  /**
4433   * cpuidle_enable_device - enables idle PM for a CPU
4434   * @dev: the CPU
4435 @@ -178,6 +217,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
4436  			return ret;
4437  	}
4438  
4439 +	poll_idle_init(dev);
4440 +
4441  	if ((ret = cpuidle_add_state_sysfs(dev)))
4442  		return ret;
4443  
4444 @@ -232,45 +273,6 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
4445  
4446  EXPORT_SYMBOL_GPL(cpuidle_disable_device);
4447  
4448 -#ifdef CONFIG_ARCH_HAS_CPU_RELAX
4449 -static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
4450 -{
4451 -	ktime_t	t1, t2;
4452 -	s64 diff;
4453 -	int ret;
4454 -
4455 -	t1 = ktime_get();
4456 -	local_irq_enable();
4457 -	while (!need_resched())
4458 -		cpu_relax();
4459 -
4460 -	t2 = ktime_get();
4461 -	diff = ktime_to_us(ktime_sub(t2, t1));
4462 -	if (diff > INT_MAX)
4463 -		diff = INT_MAX;
4464 -
4465 -	ret = (int) diff;
4466 -	return ret;
4467 -}
4468 -
4469 -static void poll_idle_init(struct cpuidle_device *dev)
4470 -{
4471 -	struct cpuidle_state *state = &dev->states[0];
4472 -
4473 -	cpuidle_set_statedata(state, NULL);
4474 -
4475 -	snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
4476 -	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
4477 -	state->exit_latency = 0;
4478 -	state->target_residency = 0;
4479 -	state->power_usage = -1;
4480 -	state->flags = CPUIDLE_FLAG_POLL;
4481 -	state->enter = poll_idle;
4482 -}
4483 -#else
4484 -static void poll_idle_init(struct cpuidle_device *dev) {}
4485 -#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
4486 -
4487  /**
4488   * __cpuidle_register_device - internal register function called before register
4489   * and enable routines
4490 @@ -291,8 +293,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
4491  
4492  	init_completion(&dev->kobj_unregister);
4493  
4494 -	poll_idle_init(dev);
4495 -
4496  	/*
4497  	 * cpuidle driver should set the dev->power_specified bit
4498  	 * before registering the device if the driver provides
4499 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
4500 index 2e992bc..8a515ba 100644
4501 --- a/drivers/crypto/padlock-aes.c
4502 +++ b/drivers/crypto/padlock-aes.c
4503 @@ -286,7 +286,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
4504  	if (initial)
4505  		asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
4506  			      : "+S" (input), "+D" (output), "+a" (iv)
4507 -			      : "d" (control_word), "b" (key), "c" (count));
4508 +			      : "d" (control_word), "b" (key), "c" (initial));
4509  
4510  	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
4511  		      : "+S" (input), "+D" (output), "+a" (iv)
4512 diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
4513 index 411d5bf..a25f5f6 100644
4514 --- a/drivers/dma/mv_xor.c
4515 +++ b/drivers/dma/mv_xor.c
4516 @@ -449,7 +449,7 @@ mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
4517  static void mv_xor_tasklet(unsigned long data)
4518  {
4519  	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
4520 -	__mv_xor_slot_cleanup(chan);
4521 +	mv_xor_slot_cleanup(chan);
4522  }
4523  
4524  static struct mv_xor_desc_slot *
4525 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
4526 index e7d5d6b..33780d8 100644
4527 --- a/drivers/edac/amd64_edac.c
4528 +++ b/drivers/edac/amd64_edac.c
4529 @@ -1572,7 +1572,7 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
4530  	debugf1("   HoleOffset=0x%x  HoleValid=0x%x IntlvSel=0x%x\n",
4531  			hole_off, hole_valid, intlv_sel);
4532  
4533 -	if (intlv_en ||
4534 +	if (intlv_en &&
4535  	    (intlv_sel != ((sys_addr >> 12) & intlv_en)))
4536  		return -EINVAL;
4537  
4538 diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
4539 index 6b21e25..6d2e34d 100644
4540 --- a/drivers/edac/edac_mc.c
4541 +++ b/drivers/edac/edac_mc.c
4542 @@ -578,14 +578,16 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
4543  		return NULL;
4544  	}
4545  
4546 -	/* marking MCI offline */
4547 -	mci->op_state = OP_OFFLINE;
4548 -
4549  	del_mc_from_global_list(mci);
4550  	mutex_unlock(&mem_ctls_mutex);
4551  
4552 -	/* flush workq processes and remove sysfs */
4553 +	/* flush workq processes */
4554  	edac_mc_workq_teardown(mci);
4555 +
4556 +	/* marking MCI offline */
4557 +	mci->op_state = OP_OFFLINE;
4558 +
4559 +	/* remove from sysfs */
4560  	edac_remove_sysfs_mci_device(mci);
4561  
4562  	edac_printk(KERN_INFO, EDAC_MC,
4563 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
4564 index be04923..24ff355 100644
4565 --- a/drivers/firewire/core-card.c
4566 +++ b/drivers/firewire/core-card.c
4567 @@ -75,6 +75,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
4568  #define BIB_IRMC		((1) << 31)
4569  #define NODE_CAPABILITIES	0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
4570  
4571 +#define CANON_OUI		0x000085
4572 +
4573  static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
4574  {
4575  	struct fw_descriptor *desc;
4576 @@ -284,6 +286,7 @@ static void bm_work(struct work_struct *work)
4577  	bool root_device_is_running;
4578  	bool root_device_is_cmc;
4579  	bool irm_is_1394_1995_only;
4580 +	bool keep_this_irm;
4581  
4582  	spin_lock_irq(&card->lock);
4583  
4584 @@ -305,6 +308,10 @@ static void bm_work(struct work_struct *work)
4585  	irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
4586  			(irm_device->config_rom[2] & 0x000000f0) == 0;
4587  
4588 +	/* Canon MV5i works unreliably if it is not root node. */
4589 +	keep_this_irm = irm_device && irm_device->config_rom &&
4590 +			irm_device->config_rom[3] >> 8 == CANON_OUI;
4591 +
4592  	root_id  = root_node->node_id;
4593  	irm_id   = card->irm_node->node_id;
4594  	local_id = card->local_node->node_id;
4595 @@ -333,7 +340,7 @@ static void bm_work(struct work_struct *work)
4596  			goto pick_me;
4597  		}
4598  
4599 -		if (irm_is_1394_1995_only) {
4600 +		if (irm_is_1394_1995_only && !keep_this_irm) {
4601  			new_root_id = local_id;
4602  			fw_notify("%s, making local node (%02x) root.\n",
4603  				  "IRM is not 1394a compliant", new_root_id);
4604 @@ -382,7 +389,7 @@ static void bm_work(struct work_struct *work)
4605  
4606  		spin_lock_irq(&card->lock);
4607  
4608 -		if (rcode != RCODE_COMPLETE) {
4609 +		if (rcode != RCODE_COMPLETE && !keep_this_irm) {
4610  			/*
4611  			 * The lock request failed, maybe the IRM
4612  			 * isn't really IRM capable after all. Let's
4613 diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
4614 index 9dcb17d..a03cb6a 100644
4615 --- a/drivers/firewire/ohci.c
4616 +++ b/drivers/firewire/ohci.c
4617 @@ -242,6 +242,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
4618  
4619  static char ohci_driver_name[] = KBUILD_MODNAME;
4620  
4621 +#define PCI_DEVICE_ID_AGERE_FW643	0x5901
4622  #define PCI_DEVICE_ID_JMICRON_JMB38X_FW	0x2380
4623  #define PCI_DEVICE_ID_TI_TSB12LV22	0x8009
4624  
4625 @@ -253,18 +254,34 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
4626  
4627  /* In case of multiple matches in ohci_quirks[], only the first one is used. */
4628  static const struct {
4629 -	unsigned short vendor, device, flags;
4630 +	unsigned short vendor, device, revision, flags;
4631  } ohci_quirks[] = {
4632 -	{PCI_VENDOR_ID_TI,	PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
4633 -							    QUIRK_RESET_PACKET |
4634 -							    QUIRK_NO_1394A},
4635 -	{PCI_VENDOR_ID_TI,	PCI_ANY_ID,	QUIRK_RESET_PACKET},
4636 -	{PCI_VENDOR_ID_AL,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
4637 -	{PCI_VENDOR_ID_JMICRON,	PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
4638 -	{PCI_VENDOR_ID_NEC,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
4639 -	{PCI_VENDOR_ID_VIA,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
4640 -	{PCI_VENDOR_ID_RICOH,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
4641 -	{PCI_VENDOR_ID_APPLE,	PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
4642 +	{PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
4643 +		QUIRK_CYCLE_TIMER},
4644 +
4645 +	{PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
4646 +		QUIRK_BE_HEADERS},
4647 +
4648 +	{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
4649 +		QUIRK_NO_MSI},
4650 +
4651 +	{PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
4652 +		QUIRK_NO_MSI},
4653 +
4654 +	{PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
4655 +		QUIRK_CYCLE_TIMER},
4656 +
4657 +	{PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
4658 +		QUIRK_CYCLE_TIMER},
4659 +
4660 +	{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
4661 +		QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
4662 +
4663 +	{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
4664 +		QUIRK_RESET_PACKET},
4665 +
4666 +	{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
4667 +		QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
4668  };
4669  
4670  /* This overrides anything that was found in ohci_quirks[]. */
4671 @@ -739,7 +756,7 @@ static void ar_context_tasklet(unsigned long data)
4672  	d = &ab->descriptor;
4673  
4674  	if (d->res_count == 0) {
4675 -		size_t size, rest, offset;
4676 +		size_t size, size2, rest, pktsize, size3, offset;
4677  		dma_addr_t start_bus;
4678  		void *start;
4679  
4680 @@ -750,25 +767,61 @@ static void ar_context_tasklet(unsigned long data)
4681  		 */
4682  
4683  		offset = offsetof(struct ar_buffer, data);
4684 -		start = buffer = ab;
4685 +		start = ab;
4686  		start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
4687 +		buffer = ab->data;
4688  
4689  		ab = ab->next;
4690  		d = &ab->descriptor;
4691 -		size = buffer + PAGE_SIZE - ctx->pointer;
4692 +		size = start + PAGE_SIZE - ctx->pointer;
4693 +		/* valid buffer data in the next page */
4694  		rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
4695 +		/* what actually fits in this page */
4696 +		size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
4697  		memmove(buffer, ctx->pointer, size);
4698 -		memcpy(buffer + size, ab->data, rest);
4699 -		ctx->current_buffer = ab;
4700 -		ctx->pointer = (void *) ab->data + rest;
4701 -		end = buffer + size + rest;
4702 +		memcpy(buffer + size, ab->data, size2);
4703 +
4704 +		while (size > 0) {
4705 +			void *next = handle_ar_packet(ctx, buffer);
4706 +			pktsize = next - buffer;
4707 +			if (pktsize >= size) {
4708 +				/*
4709 +				 * We have handled all the data that was
4710 +				 * originally in this page, so we can now
4711 +				 * continue in the next page.
4712 +				 */
4713 +				buffer = next;
4714 +				break;
4715 +			}
4716 +			/* move the next packet to the start of the buffer */
4717 +			memmove(buffer, next, size + size2 - pktsize);
4718 +			size -= pktsize;
4719 +			/* fill up this page again */
4720 +			size3 = min(rest - size2,
4721 +				    (size_t)PAGE_SIZE - offset - size - size2);
4722 +			memcpy(buffer + size + size2,
4723 +			       (void *) ab->data + size2, size3);
4724 +			size2 += size3;
4725 +		}
4726  
4727 -		while (buffer < end)
4728 -			buffer = handle_ar_packet(ctx, buffer);
4729 +		if (rest > 0) {
4730 +			/* handle the packets that are fully in the next page */
4731 +			buffer = (void *) ab->data +
4732 +					(buffer - (start + offset + size));
4733 +			end = (void *) ab->data + rest;
4734 +
4735 +			while (buffer < end)
4736 +				buffer = handle_ar_packet(ctx, buffer);
4737  
4738 -		dma_free_coherent(ohci->card.device, PAGE_SIZE,
4739 -				  start, start_bus);
4740 -		ar_context_add_page(ctx);
4741 +			ctx->current_buffer = ab;
4742 +			ctx->pointer = end;
4743 +
4744 +			dma_free_coherent(ohci->card.device, PAGE_SIZE,
4745 +					  start, start_bus);
4746 +			ar_context_add_page(ctx);
4747 +		} else {
4748 +			ctx->pointer = start + PAGE_SIZE;
4749 +		}
4750  	} else {
4751  		buffer = ctx->pointer;
4752  		ctx->pointer = end =
4753 @@ -2885,9 +2938,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
4754  	}
4755  
4756  	for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
4757 -		if (ohci_quirks[i].vendor == dev->vendor &&
4758 -		    (ohci_quirks[i].device == dev->device ||
4759 -		     ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
4760 +		if ((ohci_quirks[i].vendor == dev->vendor) &&
4761 +		    (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
4762 +		     ohci_quirks[i].device == dev->device) &&
4763 +		    (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
4764 +		     ohci_quirks[i].revision >= dev->revision)) {
4765  			ohci->quirks = ohci_quirks[i].flags;
4766  			break;
4767  		}
4768 diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
4769 index e23c068..d3e55a0 100644
4770 --- a/drivers/gpio/cs5535-gpio.c
4771 +++ b/drivers/gpio/cs5535-gpio.c
4772 @@ -56,6 +56,29 @@ static struct cs5535_gpio_chip {
4773   * registers, see include/linux/cs5535.h.
4774   */
4775  
4776 +static void errata_outl(struct cs5535_gpio_chip *chip, u32 val,
4777 +		unsigned int reg)
4778 +{
4779 +	unsigned long addr = chip->base + 0x80 + reg;
4780 +
4781 +	/*
4782 +	 * According to the CS5536 errata (#36), after suspend
4783 +	 * a write to the high bank GPIO register will clear all
4784 +	 * non-selected bits; the recommended workaround is a
4785 +	 * read-modify-write operation.
4786 +	 *
4787 +	 * Don't apply this errata to the edge status GPIOs, as writing
4788 +	 * to their lower bits will clear them.
4789 +	 */
4790 +	if (reg != GPIO_POSITIVE_EDGE_STS && reg != GPIO_NEGATIVE_EDGE_STS) {
4791 +		if (val & 0xffff)
4792 +			val |= (inl(addr) & 0xffff); /* ignore the high bits */
4793 +		else
4794 +			val |= (inl(addr) ^ (val >> 16));
4795 +	}
4796 +	outl(val, addr);
4797 +}
4798 +
4799  static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
4800  		unsigned int reg)
4801  {
4802 @@ -64,7 +87,7 @@ static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
4803  		outl(1 << offset, chip->base + reg);
4804  	else
4805  		/* high bank register */
4806 -		outl(1 << (offset - 16), chip->base + 0x80 + reg);
4807 +		errata_outl(chip, 1 << (offset - 16), reg);
4808  }
4809  
4810  void cs5535_gpio_set(unsigned offset, unsigned int reg)
4811 @@ -86,7 +109,7 @@ static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset,
4812  		outl(1 << (offset + 16), chip->base + reg);
4813  	else
4814  		/* high bank register */
4815 -		outl(1 << offset, chip->base + 0x80 + reg);
4816 +		errata_outl(chip, 1 << offset, reg);
4817  }
4818  
4819  void cs5535_gpio_clear(unsigned offset, unsigned int reg)
4820 diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
4821 index 2762698..897e057 100644
4822 --- a/drivers/gpio/rdc321x-gpio.c
4823 +++ b/drivers/gpio/rdc321x-gpio.c
4824 @@ -135,7 +135,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
4825  	struct rdc321x_gpio *rdc321x_gpio_dev;
4826  	struct rdc321x_gpio_pdata *pdata;
4827  
4828 -	pdata = pdev->dev.platform_data;
4829 +	pdata = platform_get_drvdata(pdev);
4830  	if (!pdata) {
4831  		dev_err(&pdev->dev, "no platform data supplied\n");
4832  		return -ENODEV;
4833 diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
4834 index 4cab0c6..aa17594 100644
4835 --- a/drivers/gpu/drm/Kconfig
4836 +++ b/drivers/gpu/drm/Kconfig
4837 @@ -97,7 +97,10 @@ config DRM_I830
4838  config DRM_I915
4839  	tristate "i915 driver"
4840  	depends on AGP_INTEL
4841 +	# we need shmfs for the swappable backing store, and in particular
4842 +	# the shmem_readpage() which depends upon tmpfs
4843  	select SHMEM
4844 +	select TMPFS
4845  	select DRM_KMS_HELPER
4846  	select FB_CFB_FILLRECT
4847  	select FB_CFB_COPYAREA
4848 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
4849 index 37e0b4f..dfc6358 100644
4850 --- a/drivers/gpu/drm/drm_crtc.c
4851 +++ b/drivers/gpu/drm/drm_crtc.c
4852 @@ -156,12 +156,12 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
4853  	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
4854  	{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
4855  	{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
4856 -	{ DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
4857 -	{ DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
4858 -	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
4859 -	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
4860 +	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
4861 +	{ DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
4862 +	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
4863 +	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
4864  	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
4865 -	{ DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
4866 +	{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
4867  };
4868  
4869  static struct drm_prop_enum_list drm_encoder_enum_list[] =
4870 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
4871 index dcbeb98..ae9fb7a 100644
4872 --- a/drivers/gpu/drm/drm_crtc_helper.c
4873 +++ b/drivers/gpu/drm/drm_crtc_helper.c
4874 @@ -649,6 +649,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
4875  						      old_fb)) {
4876  				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
4877  					  set->crtc->base.id);
4878 +				set->crtc->fb = old_fb;
4879  				ret = -EINVAL;
4880  				goto fail;
4881  			}
4882 @@ -663,8 +664,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
4883  			set->crtc->fb = set->fb;
4884  		ret = crtc_funcs->mode_set_base(set->crtc,
4885  						set->x, set->y, old_fb);
4886 -		if (ret != 0)
4887 +		if (ret != 0) {
4888 +			set->crtc->fb = old_fb;
4889  			goto fail;
4890 +		}
4891  	}
4892  
4893  	kfree(save_connectors);
4894 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
4895 index 2dd2c93..e6fc48e 100644
4896 --- a/drivers/gpu/drm/i915/i915_dma.c
4897 +++ b/drivers/gpu/drm/i915/i915_dma.c
4898 @@ -34,6 +34,7 @@
4899  #include "i915_drm.h"
4900  #include "i915_drv.h"
4901  #include "i915_trace.h"
4902 +#include "../../../platform/x86/intel_ips.h"
4903  #include <linux/pci.h>
4904  #include <linux/vgaarb.h>
4905  #include <linux/acpi.h>
4906 @@ -1418,9 +1419,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
4907  	if (ret)
4908  		DRM_INFO("failed to find VBIOS tables\n");
4909  
4910 -	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
4911 +	/* If we have > 1 VGA cards, then we need to arbitrate access
4912 +	 * to the common VGA resources.
4913 +	 *
4914 +	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
4915 +	 * then we do not take part in VGA arbitration and the
4916 +	 * vga_client_register() fails with -ENODEV.
4917 +	 */
4918  	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
4919 -	if (ret)
4920 +	if (ret && ret != -ENODEV)
4921  		goto cleanup_ringbuffer;
4922  
4923  	ret = vga_switcheroo_register_client(dev->pdev,
4924 @@ -2047,6 +2054,26 @@ out_unlock:
4925  EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4926  
4927  /**
4928 + * Tells the intel_ips driver that the i915 driver is now loaded, if
4929 + * IPS got loaded first.
4930 + *
4931 + * This awkward dance is so that neither module has to depend on the
4932 + * other in order for IPS to do the appropriate communication of
4933 + * GPU turbo limits to i915.
4934 + */
4935 +static void
4936 +ips_ping_for_i915_load(void)
4937 +{
4938 +	void (*link)(void);
4939 +
4940 +	link = symbol_get(ips_link_to_i915_driver);
4941 +	if (link) {
4942 +		link();
4943 +		symbol_put(ips_link_to_i915_driver);
4944 +	}
4945 +}
4946 +
4947 +/**
4948   * i915_driver_load - setup chip and create an initial config
4949   * @dev: DRM device
4950   * @flags: startup flags
4951 @@ -2234,6 +2261,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
4952  	/* XXX Prevent module unload due to memory corruption bugs. */
4953  	__module_get(THIS_MODULE);
4954  
4955 +	ips_ping_for_i915_load();
4956 +
4957  	return 0;
4958  
4959  out_workqueue_free:
4960 @@ -2306,6 +2335,9 @@ int i915_driver_unload(struct drm_device *dev)
4961  		i915_gem_lastclose(dev);
4962  
4963  		intel_cleanup_overlay(dev);
4964 +
4965 +		if (!I915_NEED_GFX_HWS(dev))
4966 +			i915_free_hws(dev);
4967  	}
4968  
4969  	intel_teardown_mchbar(dev);
4970 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
4971 index 6dbe14c..7792c8f 100644
4972 --- a/drivers/gpu/drm/i915/i915_drv.c
4973 +++ b/drivers/gpu/drm/i915/i915_drv.c
4974 @@ -53,7 +53,7 @@ extern int intel_agp_enabled;
4975  
4976  #define INTEL_VGA_DEVICE(id, info) {		\
4977  	.class = PCI_CLASS_DISPLAY_VGA << 8,	\
4978 -	.class_mask = 0xffff00,			\
4979 +	.class_mask = 0xff0000,			\
4980  	.vendor = 0x8086,			\
4981  	.device = id,				\
4982  	.subvendor = PCI_ANY_ID,		\
4983 @@ -414,6 +414,14 @@ int i965_reset(struct drm_device *dev, u8 flags)
4984  static int __devinit
4985  i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4986  {
4987 +	/* Only bind to function 0 of the device. Early generations
4988 +	 * used function 1 as a placeholder for multi-head. This causes
4989 +	 * us confusion instead, especially on the systems where both
4990 +	 * functions have the same PCI-ID!
4991 +	 */
4992 +	if (PCI_FUNC(pdev->devfn))
4993 +		return -ENODEV;
4994 +
4995  	return drm_get_pci_dev(pdev, ent, &driver);
4996  }
4997  
4998 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
4999 index 744225e..477e4ac 100644
5000 --- a/drivers/gpu/drm/i915/i915_irq.c
5001 +++ b/drivers/gpu/drm/i915/i915_irq.c
5002 @@ -310,6 +310,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
5003  	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
5004  	int ret = IRQ_NONE;
5005  	u32 de_iir, gt_iir, de_ier, pch_iir;
5006 +	u32 hotplug_mask;
5007  	struct drm_i915_master_private *master_priv;
5008  	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
5009  
5010 @@ -325,6 +326,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
5011  	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
5012  		goto done;
5013  
5014 +	if (HAS_PCH_CPT(dev))
5015 +		hotplug_mask = SDE_HOTPLUG_MASK_CPT;
5016 +	else
5017 +		hotplug_mask = SDE_HOTPLUG_MASK;
5018 +
5019  	ret = IRQ_HANDLED;
5020  
5021  	if (dev->primary->master) {
5022 @@ -366,10 +372,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
5023  		drm_handle_vblank(dev, 1);
5024  
5025  	/* check event from PCH */
5026 -	if ((de_iir & DE_PCH_EVENT) &&
5027 -	    (pch_iir & SDE_HOTPLUG_MASK)) {
5028 +	if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
5029  		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
5030 -	}
5031  
5032  	if (de_iir & DE_PCU_EVENT) {
5033  		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
5034 @@ -1424,8 +1428,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
5035  	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
5036  			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
5037  	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
5038 -	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
5039 -			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
5040 +	u32 hotplug_mask;
5041  
5042  	dev_priv->irq_mask_reg = ~display_mask;
5043  	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
5044 @@ -1450,6 +1453,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
5045  	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
5046  	(void) I915_READ(GTIER);
5047  
5048 +	if (HAS_PCH_CPT(dev)) {
5049 +		hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT  |
5050 +			       SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
5051 +	} else {
5052 +		hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
5053 +			       SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
5054 +	}
5055 +
5056  	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
5057  	dev_priv->pch_irq_enable_reg = hotplug_mask;
5058  
5059 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
5060 index 4f5e155..7103d24 100644
5061 --- a/drivers/gpu/drm/i915/i915_reg.h
5062 +++ b/drivers/gpu/drm/i915/i915_reg.h
5063 @@ -2551,6 +2551,10 @@
5064  #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
5065  #define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
5066  #define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
5067 +#define SDE_HOTPLUG_MASK_CPT	(SDE_CRT_HOTPLUG_CPT |		\
5068 +				 SDE_PORTD_HOTPLUG_CPT |	\
5069 +				 SDE_PORTC_HOTPLUG_CPT |	\
5070 +				 SDE_PORTB_HOTPLUG_CPT)
5071  
5072  #define SDEISR  0xc4000
5073  #define SDEIMR  0xc4004
5074 @@ -2722,6 +2726,9 @@
5075  #define FDI_RXB_CHICKEN         0xc2010
5076  #define  FDI_RX_PHASE_SYNC_POINTER_ENABLE       (1)
5077  
5078 +#define SOUTH_DSPCLK_GATE_D	0xc2020
5079 +#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
5080 +
5081  /* CPU: FDI_TX */
5082  #define FDI_TXA_CTL             0x60100
5083  #define FDI_TXB_CTL             0x61100
5084 @@ -2946,6 +2953,7 @@
5085  #define  TRANS_DP_10BPC		(1<<9)
5086  #define  TRANS_DP_6BPC		(2<<9)
5087  #define  TRANS_DP_12BPC		(3<<9)
5088 +#define  TRANS_DP_BPC_MASK	(3<<9)
5089  #define  TRANS_DP_VSYNC_ACTIVE_HIGH	(1<<4)
5090  #define  TRANS_DP_VSYNC_ACTIVE_LOW	0
5091  #define  TRANS_DP_HSYNC_ACTIVE_HIGH	(1<<3)
5092 @@ -2959,10 +2967,11 @@
5093  #define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
5094  #define  EDP_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
5095  /* SNB B-stepping */
5096 -#define  EDP_LINK_TRAIN_400MV_0DB_SNB_B		(0x0<<22)
5097 -#define  EDP_LINK_TRAIN_400MV_6DB_SNB_B		(0x3a<<22)
5098 -#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_B	(0x39<<22)
5099 -#define  EDP_LINK_TRAIN_800MV_0DB_SNB_B		(0x38<<22)
5100 +#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B	(0x0<<22)
5101 +#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B	(0x1<<22)
5102 +#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B	(0x3a<<22)
5103 +#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B	(0x39<<22)
5104 +#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B	(0x38<<22)
5105  #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
5106  
5107  #endif /* _I915_REG_H_ */
5108 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
5109 index 31f0858..2df5b9a 100644
5110 --- a/drivers/gpu/drm/i915/i915_suspend.c
5111 +++ b/drivers/gpu/drm/i915/i915_suspend.c
5112 @@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
5113  	/* Clock gating state */
5114  	intel_init_clock_gating(dev);
5115  
5116 -	if (HAS_PCH_SPLIT(dev))
5117 +	if (HAS_PCH_SPLIT(dev)) {
5118  		ironlake_enable_drps(dev);
5119 +		intel_init_emon(dev);
5120 +	}
5121  
5122  	/* Cache mode state */
5123  	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
5124 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
5125 index 197d4f3..0f950e7 100644
5126 --- a/drivers/gpu/drm/i915/intel_crt.c
5127 +++ b/drivers/gpu/drm/i915/intel_crt.c
5128 @@ -191,7 +191,8 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
5129  		DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
5130  
5131  	if (turn_off_dac) {
5132 -		I915_WRITE(PCH_ADPA, temp);
5133 +		/* Make sure hotplug is enabled */
5134 +		I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
5135  		(void)I915_READ(PCH_ADPA);
5136  	}
5137  
5138 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
5139 index 9792285..932a061 100644
5140 --- a/drivers/gpu/drm/i915/intel_display.c
5141 +++ b/drivers/gpu/drm/i915/intel_display.c
5142 @@ -2044,9 +2044,11 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
5143  
5144  				reg = I915_READ(trans_dp_ctl);
5145  				reg &= ~(TRANS_DP_PORT_SEL_MASK |
5146 -					 TRANS_DP_SYNC_MASK);
5147 +					 TRANS_DP_SYNC_MASK |
5148 +					 TRANS_DP_BPC_MASK);
5149  				reg |= (TRANS_DP_OUTPUT_ENABLE |
5150  					TRANS_DP_ENH_FRAMING);
5151 +				reg |= TRANS_DP_8BPC;
5152  
5153  				if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
5154  				      reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5155 @@ -5674,6 +5676,13 @@ void intel_init_clock_gating(struct drm_device *dev)
5156  		I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
5157  
5158  		/*
5159 +		 * On Ibex Peak and Cougar Point, we need to disable clock
5160 +		 * gating for the panel power sequencer or it will fail to
5161 +		 * start up when no ports are active.
5162 +		 */
5163 +		I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5164 +
5165 +		/*
5166  		 * According to the spec the following bits should be set in
5167  		 * order to enable memory self-refresh
5168  		 * The bit 22/21 of 0x42004
5169 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
5170 index 9ab8708..0aa77f3 100644
5171 --- a/drivers/gpu/drm/i915/intel_dp.c
5172 +++ b/drivers/gpu/drm/i915/intel_dp.c
5173 @@ -425,6 +425,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
5174  	uint16_t address = algo_data->address;
5175  	uint8_t msg[5];
5176  	uint8_t reply[2];
5177 +	unsigned retry;
5178  	int msg_bytes;
5179  	int reply_bytes;
5180  	int ret;
5181 @@ -459,14 +460,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
5182  		break;
5183  	}
5184  
5185 -	for (;;) {
5186 -	  ret = intel_dp_aux_ch(intel_dp,
5187 -				msg, msg_bytes,
5188 -				reply, reply_bytes);
5189 +	for (retry = 0; retry < 5; retry++) {
5190 +		ret = intel_dp_aux_ch(intel_dp,
5191 +				      msg, msg_bytes,
5192 +				      reply, reply_bytes);
5193  		if (ret < 0) {
5194  			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
5195  			return ret;
5196  		}
5197 +
5198 +		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
5199 +		case AUX_NATIVE_REPLY_ACK:
5200 +			/* I2C-over-AUX Reply field is only valid
5201 +			 * when paired with AUX ACK.
5202 +			 */
5203 +			break;
5204 +		case AUX_NATIVE_REPLY_NACK:
5205 +			DRM_DEBUG_KMS("aux_ch native nack\n");
5206 +			return -EREMOTEIO;
5207 +		case AUX_NATIVE_REPLY_DEFER:
5208 +			udelay(100);
5209 +			continue;
5210 +		default:
5211 +			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
5212 +				  reply[0]);
5213 +			return -EREMOTEIO;
5214 +		}
5215 +
5216  		switch (reply[0] & AUX_I2C_REPLY_MASK) {
5217  		case AUX_I2C_REPLY_ACK:
5218  			if (mode == MODE_I2C_READ) {
5219 @@ -474,17 +494,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
5220  			}
5221  			return reply_bytes - 1;
5222  		case AUX_I2C_REPLY_NACK:
5223 -			DRM_DEBUG_KMS("aux_ch nack\n");
5224 +			DRM_DEBUG_KMS("aux_i2c nack\n");
5225  			return -EREMOTEIO;
5226  		case AUX_I2C_REPLY_DEFER:
5227 -			DRM_DEBUG_KMS("aux_ch defer\n");
5228 +			DRM_DEBUG_KMS("aux_i2c defer\n");
5229  			udelay(100);
5230  			break;
5231  		default:
5232 -			DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
5233 +			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
5234  			return -EREMOTEIO;
5235  		}
5236  	}
5237 +
5238 +	DRM_ERROR("too many retries, giving up\n");
5239 +	return -EREMOTEIO;
5240  }
5241  
5242  static int
5243 @@ -1070,18 +1093,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
5244  static uint32_t
5245  intel_gen6_edp_signal_levels(uint8_t train_set)
5246  {
5247 -	switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
5248 +	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
5249 +					 DP_TRAIN_PRE_EMPHASIS_MASK);
5250 +	switch (signal_levels) {
5251  	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
5252 -		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
5253 +	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
5254 +		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
5255 +	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
5256 +		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
5257  	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
5258 -		return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
5259 +	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
5260 +		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
5261  	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
5262 -		return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
5263 +	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
5264 +		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
5265  	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
5266 -		return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
5267 +	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
5268 +		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
5269  	default:
5270 -		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
5271 -		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
5272 +		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
5273 +			      "0x%x\n", signal_levels);
5274 +		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
5275  	}
5276  }
5277  
5278 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
5279 index 8828b3a..2b16137 100644
5280 --- a/drivers/gpu/drm/i915/intel_drv.h
5281 +++ b/drivers/gpu/drm/i915/intel_drv.h
5282 @@ -250,6 +250,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5283  extern void intel_init_clock_gating(struct drm_device *dev);
5284  extern void ironlake_enable_drps(struct drm_device *dev);
5285  extern void ironlake_disable_drps(struct drm_device *dev);
5286 +extern void intel_init_emon(struct drm_device *dev);
5287  
5288  extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
5289  				      struct drm_gem_object *obj);
5290 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
5291 index 6ec39a8..c3b2208 100644
5292 --- a/drivers/gpu/drm/i915/intel_lvds.c
5293 +++ b/drivers/gpu/drm/i915/intel_lvds.c
5294 @@ -701,6 +701,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
5295  	},
5296  	{
5297  		.callback = intel_no_lvds_dmi_callback,
5298 +		.ident = "AOpen i915GMm-HFS",
5299 +		.matches = {
5300 +			DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
5301 +			DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
5302 +		},
5303 +	},
5304 +	{
5305 +		.callback = intel_no_lvds_dmi_callback,
5306  		.ident = "Aopen i945GTt-VFA",
5307  		.matches = {
5308  			DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
5309 diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
5310 index 1d306a4..7436641 100644
5311 --- a/drivers/gpu/drm/i915/intel_overlay.c
5312 +++ b/drivers/gpu/drm/i915/intel_overlay.c
5313 @@ -1367,6 +1367,12 @@ void intel_setup_overlay(struct drm_device *dev)
5314                          goto out_free_bo;
5315                  }
5316  		overlay->flip_addr = overlay->reg_bo->gtt_offset;
5317 +
5318 +		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
5319 +		if (ret) {
5320 +                        DRM_ERROR("failed to move overlay register bo into the GTT\n");
5321 +                        goto out_unpin_bo;
5322 +                }
5323  	} else {
5324  		ret = i915_gem_attach_phys_object(dev, reg_bo,
5325  						  I915_GEM_PHYS_OVERLAY_REGS,
5326 @@ -1399,6 +1405,8 @@ void intel_setup_overlay(struct drm_device *dev)
5327  	DRM_INFO("initialized overlay support\n");
5328  	return;
5329  
5330 +out_unpin_bo:
5331 +	i915_gem_object_unpin(reg_bo);
5332  out_free_bo:
5333  	drm_gem_object_unreference(reg_bo);
5334  out_free:
5335 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
5336 index ee73e42..b60652b 100644
5337 --- a/drivers/gpu/drm/i915/intel_sdvo.c
5338 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
5339 @@ -1498,10 +1498,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
5340  	if (!intel_sdvo_write_cmd(intel_sdvo,
5341  			     SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
5342  		return connector_status_unknown;
5343 -	if (intel_sdvo->is_tv) {
5344 -		/* add 30ms delay when the output type is SDVO-TV */
5345 +
5346 +	/* add 30ms delay when the output type might be TV */
5347 +	if (intel_sdvo->caps.output_flags &
5348 +	    (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
5349  		mdelay(30);
5350 -	}
5351 +
5352  	if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
5353  		return connector_status_unknown;
5354  
5355 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
5356 index b1be617..c926d88 100644
5357 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
5358 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
5359 @@ -531,6 +531,12 @@ struct drm_nouveau_private {
5360  	struct work_struct irq_work;
5361  	struct work_struct hpd_work;
5362  
5363 +	struct {
5364 +		spinlock_t lock;
5365 +		uint32_t hpd0_bits;
5366 +		uint32_t hpd1_bits;
5367 +	} hpd_state;
5368 +
5369  	struct list_head vbl_waiting;
5370  
5371  	struct {
5372 diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
5373 index 794b0ee..b62a601 100644
5374 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c
5375 +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
5376 @@ -52,6 +52,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
5377  	if (dev_priv->card_type >= NV_50) {
5378  		INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
5379  		INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
5380 +		spin_lock_init(&dev_priv->hpd_state.lock);
5381  		INIT_LIST_HEAD(&dev_priv->vbl_waiting);
5382  	}
5383  }
5384 diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
5385 index 612fa6d..d967cb6 100644
5386 --- a/drivers/gpu/drm/nouveau/nv50_display.c
5387 +++ b/drivers/gpu/drm/nouveau/nv50_display.c
5388 @@ -1012,11 +1012,18 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
5389  	struct drm_connector *connector;
5390  	const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
5391  	uint32_t unplug_mask, plug_mask, change_mask;
5392 -	uint32_t hpd0, hpd1 = 0;
5393 +	uint32_t hpd0, hpd1;
5394  
5395 -	hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
5396 +	spin_lock_irq(&dev_priv->hpd_state.lock);
5397 +	hpd0 = dev_priv->hpd_state.hpd0_bits;
5398 +	dev_priv->hpd_state.hpd0_bits = 0;
5399 +	hpd1 = dev_priv->hpd_state.hpd1_bits;
5400 +	dev_priv->hpd_state.hpd1_bits = 0;
5401 +	spin_unlock_irq(&dev_priv->hpd_state.lock);
5402 +
5403 +	hpd0 &= nv_rd32(dev, 0xe050);
5404  	if (dev_priv->chipset >= 0x90)
5405 -		hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
5406 +		hpd1 &= nv_rd32(dev, 0xe070);
5407  
5408  	plug_mask   = (hpd0 & 0x0000ffff) | (hpd1 << 16);
5409  	unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
5410 @@ -1058,10 +1065,6 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
5411  			helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
5412  	}
5413  
5414 -	nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
5415 -	if (dev_priv->chipset >= 0x90)
5416 -		nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
5417 -
5418  	drm_helper_hpd_irq_event(dev);
5419  }
5420  
5421 @@ -1072,8 +1075,22 @@ nv50_display_irq_handler(struct drm_device *dev)
5422  	uint32_t delayed = 0;
5423  
5424  	if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
5425 -		if (!work_pending(&dev_priv->hpd_work))
5426 -			queue_work(dev_priv->wq, &dev_priv->hpd_work);
5427 +		uint32_t hpd0_bits, hpd1_bits = 0;
5428 +
5429 +		hpd0_bits = nv_rd32(dev, 0xe054);
5430 +		nv_wr32(dev, 0xe054, hpd0_bits);
5431 +
5432 +		if (dev_priv->chipset >= 0x90) {
5433 +			hpd1_bits = nv_rd32(dev, 0xe074);
5434 +			nv_wr32(dev, 0xe074, hpd1_bits);
5435 +		}
5436 +
5437 +		spin_lock(&dev_priv->hpd_state.lock);
5438 +		dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
5439 +		dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
5440 +		spin_unlock(&dev_priv->hpd_state.lock);
5441 +
5442 +		queue_work(dev_priv->wq, &dev_priv->hpd_work);
5443  	}
5444  
5445  	while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
5446 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
5447 index 8e421f6..05efb5b 100644
5448 --- a/drivers/gpu/drm/radeon/atom.c
5449 +++ b/drivers/gpu/drm/radeon/atom.c
5450 @@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
5451  			base += 3;
5452  			break;
5453  		case ATOM_IIO_WRITE:
5454 +			(void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
5455  			ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
5456  			base += 3;
5457  			break;
5458 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
5459 index cd0290f..e226f47 100644
5460 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
5461 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
5462 @@ -253,7 +253,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
5463  	case DRM_MODE_DPMS_SUSPEND:
5464  	case DRM_MODE_DPMS_OFF:
5465  		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
5466 -		atombios_blank_crtc(crtc, ATOM_ENABLE);
5467 +		if (radeon_crtc->enabled)
5468 +			atombios_blank_crtc(crtc, ATOM_ENABLE);
5469  		if (ASIC_IS_DCE3(rdev))
5470  			atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
5471  		atombios_enable_crtc(crtc, ATOM_DISABLE);
5472 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
5473 index 4e7778d..695de9a 100644
5474 --- a/drivers/gpu/drm/radeon/atombios_dp.c
5475 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
5476 @@ -187,9 +187,9 @@ static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
5477  int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
5478  {
5479  	int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
5480 -	int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
5481 +	int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock);
5482  
5483 -	if ((lanes == 0) || (bw == 0))
5484 +	if ((lanes == 0) || (dp_clock == 0))
5485  		return MODE_CLOCK_HIGH;
5486  
5487  	return MODE_OK;
5488 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
5489 index 2f93d46..9e3dd2f 100644
5490 --- a/drivers/gpu/drm/radeon/evergreen.c
5491 +++ b/drivers/gpu/drm/radeon/evergreen.c
5492 @@ -1423,7 +1423,6 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
5493  static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
5494  {
5495  	struct evergreen_mc_save save;
5496 -	u32 srbm_reset = 0;
5497  	u32 grbm_reset = 0;
5498  
5499  	dev_info(rdev->dev, "GPU softreset \n");
5500 @@ -1462,16 +1461,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
5501  	udelay(50);
5502  	WREG32(GRBM_SOFT_RESET, 0);
5503  	(void)RREG32(GRBM_SOFT_RESET);
5504 -
5505 -	/* reset all the system blocks */
5506 -	srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
5507 -
5508 -	dev_info(rdev->dev, "  SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
5509 -	WREG32(SRBM_SOFT_RESET, srbm_reset);
5510 -	(void)RREG32(SRBM_SOFT_RESET);
5511 -	udelay(50);
5512 -	WREG32(SRBM_SOFT_RESET, 0);
5513 -	(void)RREG32(SRBM_SOFT_RESET);
5514  	/* Wait a little for things to settle down */
5515  	udelay(50);
5516  	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
5517 @@ -1482,10 +1471,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
5518  		RREG32(GRBM_STATUS_SE1));
5519  	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
5520  		RREG32(SRBM_STATUS));
5521 -	/* After reset we need to reinit the asic as GPU often endup in an
5522 -	 * incoherent state.
5523 -	 */
5524 -	atom_asic_init(rdev->mode_info.atom_context);
5525  	evergreen_mc_resume(rdev, &save);
5526  	return 0;
5527  }
5528 @@ -2097,6 +2082,11 @@ int evergreen_resume(struct radeon_device *rdev)
5529  {
5530  	int r;
5531  
5532 +	/* reset the asic, the gfx blocks are often in a bad state
5533 +	 * after the driver is unloaded or after a resume
5534 +	 */
5535 +	if (radeon_asic_reset(rdev))
5536 +		dev_warn(rdev->dev, "GPU reset failed !\n");
5537  	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
5538  	 * posting will perform necessary task to bring back GPU into good
5539  	 * shape.
5540 @@ -2193,6 +2183,11 @@ int evergreen_init(struct radeon_device *rdev)
5541  	r = radeon_atombios_init(rdev);
5542  	if (r)
5543  		return r;
5544 +	/* reset the asic, the gfx blocks are often in a bad state
5545 +	 * after the driver is unloaded or after a resume
5546 +	 */
5547 +	if (radeon_asic_reset(rdev))
5548 +		dev_warn(rdev->dev, "GPU reset failed !\n");
5549  	/* Post card if necessary */
5550  	if (!evergreen_card_posted(rdev)) {
5551  		if (!rdev->bios) {
5552 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
5553 index e594223..0ba4163 100644
5554 --- a/drivers/gpu/drm/radeon/r100.c
5555 +++ b/drivers/gpu/drm/radeon/r100.c
5556 @@ -2318,6 +2318,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
5557  		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 
5558  		 * Novell bug 204882 + along with lots of ubuntu ones
5559  		 */
5560 +		if (rdev->mc.aper_size > config_aper_size)
5561 +			config_aper_size = rdev->mc.aper_size;
5562 +
5563  		if (config_aper_size > rdev->mc.real_vram_size)
5564  			rdev->mc.mc_vram_size = config_aper_size;
5565  		else
5566 @@ -3225,6 +3228,8 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
5567  	for (u = 0; u < track->num_texture; u++) {
5568  		if (!track->textures[u].enabled)
5569  			continue;
5570 +		if (track->textures[u].lookup_disable)
5571 +			continue;
5572  		robj = track->textures[u].robj;
5573  		if (robj == NULL) {
5574  			DRM_ERROR("No texture bound to unit %u\n", u);
5575 @@ -3459,6 +3464,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
5576  		track->textures[i].robj = NULL;
5577  		/* CS IB emission code makes sure texture unit are disabled */
5578  		track->textures[i].enabled = false;
5579 +		track->textures[i].lookup_disable = false;
5580  		track->textures[i].roundup_w = true;
5581  		track->textures[i].roundup_h = true;
5582  		if (track->separate_cube)
5583 diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
5584 index f47cdca..af65600 100644
5585 --- a/drivers/gpu/drm/radeon/r100_track.h
5586 +++ b/drivers/gpu/drm/radeon/r100_track.h
5587 @@ -46,6 +46,7 @@ struct r100_cs_track_texture {
5588  	unsigned		height_11;
5589  	bool			use_pitch;
5590  	bool			enabled;
5591 +	bool                    lookup_disable;
5592  	bool			roundup_w;
5593  	bool			roundup_h;
5594  	unsigned                compress_format;
5595 diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
5596 index 0266d72..d2408c3 100644
5597 --- a/drivers/gpu/drm/radeon/r200.c
5598 +++ b/drivers/gpu/drm/radeon/r200.c
5599 @@ -447,6 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
5600  			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
5601  			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
5602  		}
5603 +		if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
5604 +			track->textures[i].lookup_disable = true;
5605  		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
5606  		case R200_TXFORMAT_I8:
5607  		case R200_TXFORMAT_RGB332:
5608 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
5609 index 7b65e4ef..74b9fb7 100644
5610 --- a/drivers/gpu/drm/radeon/r600.c
5611 +++ b/drivers/gpu/drm/radeon/r600.c
5612 @@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
5613  {
5614  	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
5615  		ASIC_T_SHIFT;
5616 -	u32 actual_temp = 0;
5617  
5618 -	if ((temp >> 7) & 1)
5619 -		actual_temp = 0;
5620 -	else
5621 -		actual_temp = (temp >> 1) & 0xff;
5622 -
5623 -	return actual_temp * 1000;
5624 +	return temp * 1000;
5625  }
5626  
5627  void r600_pm_get_dynpm_state(struct radeon_device *rdev)
5628 @@ -884,12 +878,15 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
5629  	u32 tmp;
5630  
5631  	/* flush hdp cache so updates hit vram */
5632 -	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
5633 +	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
5634 +	    !(rdev->flags & RADEON_IS_AGP)) {
5635  		void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
5636  		u32 tmp;
5637  
5638  		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
5639  		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
5640 +		 * This seems to cause problems on some AGP cards. Just use the old
5641 +		 * method for them.
5642  		 */
5643  		WREG32(HDP_DEBUG1, 0);
5644  		tmp = readl((void __iomem *)ptr);
5645 @@ -1201,8 +1198,10 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
5646  				mc->vram_end, mc->real_vram_size >> 20);
5647  	} else {
5648  		u64 base = 0;
5649 -		if (rdev->flags & RADEON_IS_IGP)
5650 -			base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
5651 +		if (rdev->flags & RADEON_IS_IGP) {
5652 +			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
5653 +			base <<= 24;
5654 +		}
5655  		radeon_vram_location(rdev, &rdev->mc, base);
5656  		rdev->mc.gtt_base_align = 0;
5657  		radeon_gtt_location(rdev, mc);
5658 @@ -1608,8 +1607,11 @@ void r600_gpu_init(struct radeon_device *rdev)
5659  	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
5660  	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
5661  	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
5662 -	tiling_config |= GROUP_SIZE(0);
5663 -	rdev->config.r600.tiling_group_size = 256;
5664 +	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
5665 +	if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
5666 +		rdev->config.r600.tiling_group_size = 512;
5667 +	else
5668 +		rdev->config.r600.tiling_group_size = 256;
5669  	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
5670  	if (tmp > 3) {
5671  		tiling_config |= ROW_TILING(3);
5672 @@ -3528,10 +3530,12 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
5673  void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
5674  {
5675  	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
5676 -	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
5677 +	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
5678 +	 * This seems to cause problems on some AGP cards. Just use the old
5679 +	 * method for them.
5680  	 */
5681  	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
5682 -	    rdev->vram_scratch.ptr) {
5683 +	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
5684  		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
5685  		u32 tmp;
5686  
5687 diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
5688 index 3473c00..e5d4928 100644
5689 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c
5690 +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
5691 @@ -650,8 +650,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
5692  			int src_x = src_gpu_addr & 255;
5693  			int dst_x = dst_gpu_addr & 255;
5694  			int h = 1;
5695 -			src_gpu_addr = src_gpu_addr & ~255;
5696 -			dst_gpu_addr = dst_gpu_addr & ~255;
5697 +			src_gpu_addr = src_gpu_addr & ~255ULL;
5698 +			dst_gpu_addr = dst_gpu_addr & ~255ULL;
5699  
5700  			if (!src_x && !dst_x) {
5701  				h = (cur_size / max_bytes);
5702 @@ -744,8 +744,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
5703  			int src_x = (src_gpu_addr & 255);
5704  			int dst_x = (dst_gpu_addr & 255);
5705  			int h = 1;
5706 -			src_gpu_addr = src_gpu_addr & ~255;
5707 -			dst_gpu_addr = dst_gpu_addr & ~255;
5708 +			src_gpu_addr = src_gpu_addr & ~255ULL;
5709 +			dst_gpu_addr = dst_gpu_addr & ~255ULL;
5710  
5711  			if (!src_x && !dst_x) {
5712  				h = (cur_size / max_bytes);
5713 diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
5714 index 250a3a9..478fddf 100644
5715 --- a/drivers/gpu/drm/radeon/r600_cs.c
5716 +++ b/drivers/gpu/drm/radeon/r600_cs.c
5717 @@ -228,7 +228,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
5718  				__func__, __LINE__, pitch);
5719  			return -EINVAL;
5720  		}
5721 -		if (!IS_ALIGNED((height / 8), track->nbanks)) {
5722 +		if (!IS_ALIGNED((height / 8), track->npipes)) {
5723  			dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
5724  				 __func__, __LINE__, height);
5725  			return -EINVAL;
5726 @@ -367,7 +367,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
5727  						 __func__, __LINE__, pitch);
5728  					return -EINVAL;
5729  				}
5730 -				if ((height / 8) & (track->nbanks - 1)) {
5731 +				if (!IS_ALIGNED((height / 8), track->npipes)) {
5732  					dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
5733  						 __func__, __LINE__, height);
5734  					return -EINVAL;
5735 diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
5736 index d84612a..33cda01 100644
5737 --- a/drivers/gpu/drm/radeon/r600_reg.h
5738 +++ b/drivers/gpu/drm/radeon/r600_reg.h
5739 @@ -86,6 +86,7 @@
5740  #define R600_HDP_NONSURFACE_BASE                                0x2c04
5741  
5742  #define R600_BUS_CNTL                                           0x5420
5743 +#       define R600_BIOS_ROM_DIS                                (1 << 1)
5744  #define R600_CONFIG_CNTL                                        0x5424
5745  #define R600_CONFIG_MEMSIZE                                     0x5428
5746  #define R600_CONFIG_F0_BASE                                     0x542C
5747 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
5748 index 8e43dda..85a0d9f 100644
5749 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
5750 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
5751 @@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
5752  				}
5753  			}
5754  
5755 +			/* some DCE3 boards have bad data for this entry */
5756 +			if (ASIC_IS_DCE3(rdev)) {
5757 +				if ((i == 4) &&
5758 +				    (gpio->usClkMaskRegisterIndex == 0x1fda) &&
5759 +				    (gpio->sucI2cId.ucAccess == 0x94))
5760 +					gpio->sucI2cId.ucAccess = 0x14;
5761 +			}
5762 +
5763  			if (gpio->sucI2cId.ucAccess == id) {
5764  				i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
5765  				i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
5766 @@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
5767  				}
5768  			}
5769  
5770 +			/* some DCE3 boards have bad data for this entry */
5771 +			if (ASIC_IS_DCE3(rdev)) {
5772 +				if ((i == 4) &&
5773 +				    (gpio->usClkMaskRegisterIndex == 0x1fda) &&
5774 +				    (gpio->sucI2cId.ucAccess == 0x94))
5775 +					gpio->sucI2cId.ucAccess = 0x14;
5776 +			}
5777 +
5778  			i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
5779  			i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
5780  			i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
5781 @@ -297,7 +313,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
5782  				     uint16_t *line_mux,
5783  				     struct radeon_hpd *hpd)
5784  {
5785 -	struct radeon_device *rdev = dev->dev_private;
5786  
5787  	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
5788  	if ((dev->pdev->device == 0x791e) &&
5789 @@ -372,6 +387,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
5790  			*line_mux = 0x90;
5791  	}
5792  
5793 +	/* mac rv630, rv730, others */
5794 +	if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
5795 +	    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
5796 +		*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
5797 +		*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
5798 +	}
5799 +
5800  	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
5801  	if ((dev->pdev->device == 0x9598) &&
5802  	    (dev->pdev->subsystem_vendor == 0x1043) &&
5803 @@ -409,21 +431,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
5804  		}
5805  	}
5806  
5807 -	/* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
5808 +	/* Acer laptop (Acer TravelMate 5730G) has an HDMI port
5809 +	 * on the laptop and a DVI port on the docking station and
5810 +	 * both share the same encoder, hpd pin, and ddc line.
5811 +	 * So while the bios table is technically correct,
5812 +	 * we drop the DVI port here since xrandr has no concept of
5813 +	 * encoders and will try and drive both connectors
5814 +	 * with different crtcs which isn't possible on the hardware
5815 +	 * side and leaves no crtcs for LVDS or VGA.
5816 +	 */
5817  	if ((dev->pdev->device == 0x95c4) &&
5818  	    (dev->pdev->subsystem_vendor == 0x1025) &&
5819  	    (dev->pdev->subsystem_device == 0x013c)) {
5820 -		struct radeon_gpio_rec gpio;
5821 -
5822  		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
5823  		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
5824 -			gpio = radeon_lookup_gpio(rdev, 6);
5825 -			*hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
5826 +			/* actually it's a DVI-D port not DVI-I */
5827  			*connector_type = DRM_MODE_CONNECTOR_DVID;
5828 -		} else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
5829 -			   (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
5830 -			gpio = radeon_lookup_gpio(rdev, 7);
5831 -			*hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
5832 +			return false;
5833  		}
5834  	}
5835  
5836 @@ -2279,7 +2303,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
5837  	bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
5838  
5839  	/* tell the bios not to handle mode switching */
5840 -	bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
5841 +	bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
5842  
5843  	if (rdev->family >= CHIP_R600) {
5844  		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
5845 @@ -2330,10 +2354,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
5846  	else
5847  		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
5848  
5849 -	if (lock)
5850 +	if (lock) {
5851  		bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
5852 -	else
5853 +		bios_6_scratch &= ~ATOM_S6_ACC_MODE;
5854 +	} else {
5855  		bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
5856 +		bios_6_scratch |= ATOM_S6_ACC_MODE;
5857 +	}
5858  
5859  	if (rdev->family >= CHIP_R600)
5860  		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
5861 diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
5862 index 654787e..8f2c7b5 100644
5863 --- a/drivers/gpu/drm/radeon/radeon_bios.c
5864 +++ b/drivers/gpu/drm/radeon/radeon_bios.c
5865 @@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
5866  	}
5867  	return true;
5868  }
5869 +
5870  static bool r700_read_disabled_bios(struct radeon_device *rdev)
5871  {
5872  	uint32_t viph_control;
5873 @@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
5874  	bool r;
5875  
5876  	viph_control = RREG32(RADEON_VIPH_CONTROL);
5877 -	bus_cntl = RREG32(RADEON_BUS_CNTL);
5878 +	bus_cntl = RREG32(R600_BUS_CNTL);
5879  	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
5880  	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
5881  	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
5882 @@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
5883  	/* disable VIP */
5884  	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
5885  	/* enable the rom */
5886 -	WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
5887 +	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
5888  	/* Disable VGA mode */
5889  	WREG32(AVIVO_D1VGA_CONTROL,
5890  	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
5891 @@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
5892  			cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
5893  	}
5894  	WREG32(RADEON_VIPH_CONTROL, viph_control);
5895 -	WREG32(RADEON_BUS_CNTL, bus_cntl);
5896 +	WREG32(R600_BUS_CNTL, bus_cntl);
5897  	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
5898  	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
5899  	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
5900 @@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
5901  	bool r;
5902  
5903  	viph_control = RREG32(RADEON_VIPH_CONTROL);
5904 -	bus_cntl = RREG32(RADEON_BUS_CNTL);
5905 +	bus_cntl = RREG32(R600_BUS_CNTL);
5906  	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
5907  	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
5908  	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
5909 @@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
5910  	/* disable VIP */
5911  	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
5912  	/* enable the rom */
5913 -	WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
5914 +	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
5915  	/* Disable VGA mode */
5916  	WREG32(AVIVO_D1VGA_CONTROL,
5917  	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
5918 @@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
5919  
5920  	/* restore regs */
5921  	WREG32(RADEON_VIPH_CONTROL, viph_control);
5922 -	WREG32(RADEON_BUS_CNTL, bus_cntl);
5923 +	WREG32(R600_BUS_CNTL, bus_cntl);
5924  	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
5925  	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
5926  	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
5927 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
5928 index 7b7ea26..137b807 100644
5929 --- a/drivers/gpu/drm/radeon/radeon_combios.c
5930 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
5931 @@ -571,6 +571,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
5932  	}
5933  
5934  	if (clk_mask && data_mask) {
5935 +		/* system specific masks */
5936  		i2c.mask_clk_mask = clk_mask;
5937  		i2c.mask_data_mask = data_mask;
5938  		i2c.a_clk_mask = clk_mask;
5939 @@ -579,7 +580,19 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
5940  		i2c.en_data_mask = data_mask;
5941  		i2c.y_clk_mask = clk_mask;
5942  		i2c.y_data_mask = data_mask;
5943 +	} else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
5944 +		   (ddc_line == RADEON_MDGPIO_MASK)) {
5945 +		/* default gpiopad masks */
5946 +		i2c.mask_clk_mask = (0x20 << 8);
5947 +		i2c.mask_data_mask = 0x80;
5948 +		i2c.a_clk_mask = (0x20 << 8);
5949 +		i2c.a_data_mask = 0x80;
5950 +		i2c.en_clk_mask = (0x20 << 8);
5951 +		i2c.en_data_mask = 0x80;
5952 +		i2c.y_clk_mask = (0x20 << 8);
5953 +		i2c.y_data_mask = 0x80;
5954  	} else {
5955 +		/* default masks for ddc pads */
5956  		i2c.mask_clk_mask = RADEON_GPIO_EN_1;
5957  		i2c.mask_data_mask = RADEON_GPIO_EN_0;
5958  		i2c.a_clk_mask = RADEON_GPIO_A_1;
5959 @@ -716,7 +729,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
5960  					clk = RBIOS8(offset + 3 + (i * 5) + 3);
5961  					data = RBIOS8(offset + 3 + (i * 5) + 4);
5962  					i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
5963 -								    clk, data);
5964 +								    (1 << clk), (1 << data));
5965  					rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
5966  					break;
5967  				}
5968 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
5969 index ecc1a8f..5e222c9 100644
5970 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
5971 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
5972 @@ -1119,6 +1119,8 @@ radeon_add_atom_connector(struct drm_device *dev,
5973  		/* no HPD on analog connectors */
5974  		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
5975  		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
5976 +		connector->interlace_allowed = true;
5977 +		connector->doublescan_allowed = true;
5978  		break;
5979  	case DRM_MODE_CONNECTOR_DVIA:
5980  		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
5981 @@ -1134,6 +1136,8 @@ radeon_add_atom_connector(struct drm_device *dev,
5982  					      1);
5983  		/* no HPD on analog connectors */
5984  		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
5985 +		connector->interlace_allowed = true;
5986 +		connector->doublescan_allowed = true;
5987  		break;
5988  	case DRM_MODE_CONNECTOR_DVII:
5989  	case DRM_MODE_CONNECTOR_DVID:
5990 @@ -1163,6 +1167,11 @@ radeon_add_atom_connector(struct drm_device *dev,
5991  						      rdev->mode_info.load_detect_property,
5992  						      1);
5993  		}
5994 +		connector->interlace_allowed = true;
5995 +		if (connector_type == DRM_MODE_CONNECTOR_DVII)
5996 +			connector->doublescan_allowed = true;
5997 +		else
5998 +			connector->doublescan_allowed = false;
5999  		break;
6000  	case DRM_MODE_CONNECTOR_HDMIA:
6001  	case DRM_MODE_CONNECTOR_HDMIB:
6002 @@ -1186,6 +1195,11 @@ radeon_add_atom_connector(struct drm_device *dev,
6003  						      rdev->mode_info.underscan_property,
6004  						      UNDERSCAN_AUTO);
6005  		subpixel_order = SubPixelHorizontalRGB;
6006 +		connector->interlace_allowed = true;
6007 +		if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
6008 +			connector->doublescan_allowed = true;
6009 +		else
6010 +			connector->doublescan_allowed = false;
6011  		break;
6012  	case DRM_MODE_CONNECTOR_DisplayPort:
6013  	case DRM_MODE_CONNECTOR_eDP:
6014 @@ -1216,6 +1230,9 @@ radeon_add_atom_connector(struct drm_device *dev,
6015  			drm_connector_attach_property(&radeon_connector->base,
6016  						      rdev->mode_info.underscan_property,
6017  						      UNDERSCAN_AUTO);
6018 +		connector->interlace_allowed = true;
6019 +		/* in theory with a DP to VGA converter... */
6020 +		connector->doublescan_allowed = false;
6021  		break;
6022  	case DRM_MODE_CONNECTOR_SVIDEO:
6023  	case DRM_MODE_CONNECTOR_Composite:
6024 @@ -1231,6 +1248,8 @@ radeon_add_atom_connector(struct drm_device *dev,
6025  					      radeon_atombios_get_tv_info(rdev));
6026  		/* no HPD on analog connectors */
6027  		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
6028 +		connector->interlace_allowed = false;
6029 +		connector->doublescan_allowed = false;
6030  		break;
6031  	case DRM_MODE_CONNECTOR_LVDS:
6032  		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
6033 @@ -1249,6 +1268,8 @@ radeon_add_atom_connector(struct drm_device *dev,
6034  					      dev->mode_config.scaling_mode_property,
6035  					      DRM_MODE_SCALE_FULLSCREEN);
6036  		subpixel_order = SubPixelHorizontalRGB;
6037 +		connector->interlace_allowed = false;
6038 +		connector->doublescan_allowed = false;
6039  		break;
6040  	}
6041  
6042 @@ -1326,6 +1347,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
6043  		/* no HPD on analog connectors */
6044  		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
6045  		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
6046 +		connector->interlace_allowed = true;
6047 +		connector->doublescan_allowed = true;
6048  		break;
6049  	case DRM_MODE_CONNECTOR_DVIA:
6050  		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
6051 @@ -1341,6 +1364,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
6052  					      1);
6053  		/* no HPD on analog connectors */
6054  		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
6055 +		connector->interlace_allowed = true;
6056 +		connector->doublescan_allowed = true;
6057  		break;
6058  	case DRM_MODE_CONNECTOR_DVII:
6059  	case DRM_MODE_CONNECTOR_DVID:
6060 @@ -1358,6 +1383,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
6061  						      1);
6062  		}
6063  		subpixel_order = SubPixelHorizontalRGB;
6064 +		connector->interlace_allowed = true;
6065 +		if (connector_type == DRM_MODE_CONNECTOR_DVII)
6066 +			connector->doublescan_allowed = true;
6067 +		else
6068 +			connector->doublescan_allowed = false;
6069  		break;
6070  	case DRM_MODE_CONNECTOR_SVIDEO:
6071  	case DRM_MODE_CONNECTOR_Composite:
6072 @@ -1380,6 +1410,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
6073  					      radeon_combios_get_tv_info(rdev));
6074  		/* no HPD on analog connectors */
6075  		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
6076 +		connector->interlace_allowed = false;
6077 +		connector->doublescan_allowed = false;
6078  		break;
6079  	case DRM_MODE_CONNECTOR_LVDS:
6080  		drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
6081 @@ -1393,6 +1425,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
6082  					      dev->mode_config.scaling_mode_property,
6083  					      DRM_MODE_SCALE_FULLSCREEN);
6084  		subpixel_order = SubPixelHorizontalRGB;
6085 +		connector->interlace_allowed = false;
6086 +		connector->doublescan_allowed = false;
6087  		break;
6088  	}
6089  
6090 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
6091 index 256d204..ed5e153 100644
6092 --- a/drivers/gpu/drm/radeon/radeon_device.c
6093 +++ b/drivers/gpu/drm/radeon/radeon_device.c
6094 @@ -829,11 +829,6 @@ int radeon_resume_kms(struct drm_device *dev)
6095  	radeon_pm_resume(rdev);
6096  	radeon_restore_bios_scratch_regs(rdev);
6097  
6098 -	/* turn on display hw */
6099 -	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
6100 -		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
6101 -	}
6102 -
6103  	radeon_fbdev_set_suspend(rdev, 0);
6104  	release_console_sem();
6105  
6106 @@ -841,6 +836,10 @@ int radeon_resume_kms(struct drm_device *dev)
6107  	radeon_hpd_init(rdev);
6108  	/* blat the mode back in */
6109  	drm_helper_resume_force_mode(dev);
6110 +	/* turn on display hw */
6111 +	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
6112 +		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
6113 +	}
6114  	return 0;
6115  }
6116  
6117 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
6118 index b92d2f2..4da243a 100644
6119 --- a/drivers/gpu/drm/radeon/radeon_display.c
6120 +++ b/drivers/gpu/drm/radeon/radeon_display.c
6121 @@ -629,6 +629,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
6122  	*frac_fb_div_p = best_frac_feedback_div;
6123  	*ref_div_p = best_ref_div;
6124  	*post_div_p = best_post_div;
6125 +	DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
6126 +		      freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
6127 +		      best_ref_div, best_post_div);
6128 +
6129  }
6130  
6131  static bool
6132 diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
6133 index 2c293e8..b82015e 100644
6134 --- a/drivers/gpu/drm/radeon/radeon_encoders.c
6135 +++ b/drivers/gpu/drm/radeon/radeon_encoders.c
6136 @@ -595,6 +595,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
6137  int
6138  atombios_get_encoder_mode(struct drm_encoder *encoder)
6139  {
6140 +	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
6141  	struct drm_device *dev = encoder->dev;
6142  	struct radeon_device *rdev = dev->dev_private;
6143  	struct drm_connector *connector;
6144 @@ -602,9 +603,20 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
6145  	struct radeon_connector_atom_dig *dig_connector;
6146  
6147  	connector = radeon_get_connector_for_encoder(encoder);
6148 -	if (!connector)
6149 -		return 0;
6150 -
6151 +	if (!connector) {
6152 +		switch (radeon_encoder->encoder_id) {
6153 +		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
6154 +		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
6155 +		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
6156 +		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
6157 +		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
6158 +			return ATOM_ENCODER_MODE_DVI;
6159 +		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
6160 +		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
6161 +		default:
6162 +			return ATOM_ENCODER_MODE_CRT;
6163 +		}
6164 +	}
6165  	radeon_connector = to_radeon_connector(connector);
6166  
6167  	switch (connector->connector_type) {
6168 @@ -1547,6 +1559,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
6169  	struct radeon_device *rdev = dev->dev_private;
6170  	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
6171  	struct radeon_encoder_atom_dig *dig;
6172 +
6173 +	/* check for pre-DCE3 cards with shared encoders;
6174 +	 * can't really use the links individually, so don't disable
6175 +	 * the encoder if it's in use by another connector
6176 +	 */
6177 +	if (!ASIC_IS_DCE3(rdev)) {
6178 +		struct drm_encoder *other_encoder;
6179 +		struct radeon_encoder *other_radeon_encoder;
6180 +
6181 +		list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
6182 +			other_radeon_encoder = to_radeon_encoder(other_encoder);
6183 +			if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
6184 +			    drm_helper_encoder_in_use(other_encoder))
6185 +				goto disable_done;
6186 +		}
6187 +	}
6188 +
6189  	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
6190  
6191  	switch (radeon_encoder->encoder_id) {
6192 @@ -1586,6 +1615,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
6193  		break;
6194  	}
6195  
6196 +disable_done:
6197  	if (radeon_encoder_is_digital(encoder)) {
6198  		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
6199  			r600_hdmi_disable(encoder);
6200 diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
6201 index 6a13ee3..acae80e 100644
6202 --- a/drivers/gpu/drm/radeon/radeon_i2c.c
6203 +++ b/drivers/gpu/drm/radeon/radeon_i2c.c
6204 @@ -946,6 +946,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
6205  	i2c->rec = *rec;
6206  	i2c->adapter.owner = THIS_MODULE;
6207  	i2c->dev = dev;
6208 +	sprintf(i2c->adapter.name, "Radeon aux bus %s", name);
6209  	i2c_set_adapdata(&i2c->adapter, i2c);
6210  	i2c->adapter.algo_data = &i2c->algo.dp;
6211  	i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
6212 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
6213 index b3b5306..25d345e 100644
6214 --- a/drivers/gpu/drm/radeon/radeon_object.c
6215 +++ b/drivers/gpu/drm/radeon/radeon_object.c
6216 @@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
6217  		type = ttm_bo_type_device;
6218  	}
6219  	*bo_ptr = NULL;
6220 +
6221 +retry:
6222  	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
6223  	if (bo == NULL)
6224  		return -ENOMEM;
6225 @@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
6226  	bo->gobj = gobj;
6227  	bo->surface_reg = -1;
6228  	INIT_LIST_HEAD(&bo->list);
6229 -
6230 -retry:
6231  	radeon_ttm_placement_from_domain(bo, domain);
6232  	/* Kernel allocation are uninterruptible */
6233  	mutex_lock(&rdev->vram_mutex);
6234 diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
6235 index c332f46..6492881 100644
6236 --- a/drivers/gpu/drm/radeon/radeon_reg.h
6237 +++ b/drivers/gpu/drm/radeon/radeon_reg.h
6238 @@ -2836,6 +2836,7 @@
6239  #       define R200_TXFORMAT_ST_ROUTE_STQ5	(5 << 24)
6240  #       define R200_TXFORMAT_ST_ROUTE_MASK	(7 << 24)
6241  #       define R200_TXFORMAT_ST_ROUTE_SHIFT	24
6242 +#       define R200_TXFORMAT_LOOKUP_DISABLE	(1 << 27)
6243  #       define R200_TXFORMAT_ALPHA_MASK_ENABLE	(1 << 28)
6244  #       define R200_TXFORMAT_CHROMA_KEY_ENABLE	(1 << 29)
6245  #       define R200_TXFORMAT_CUBIC_MAP_ENABLE		(1 << 30)
6246 diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
6247 index 9490da7..b88353d 100644
6248 --- a/drivers/gpu/drm/radeon/rv770.c
6249 +++ b/drivers/gpu/drm/radeon/rv770.c
6250 @@ -643,10 +643,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
6251  	else
6252  		gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
6253  	rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
6254 -
6255 -	gb_tiling_config |= GROUP_SIZE(0);
6256 -	rdev->config.rv770.tiling_group_size = 256;
6257 -
6258 +	gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
6259 +	if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
6260 +		rdev->config.rv770.tiling_group_size = 512;
6261 +	else
6262 +		rdev->config.rv770.tiling_group_size = 256;
6263  	if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
6264  		gb_tiling_config |= ROW_TILING(3);
6265  		gb_tiling_config |= SAMPLE_SPLIT(3);
6266 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
6267 index f366f96..d7def17 100644
6268 --- a/drivers/gpu/vga/vgaarb.c
6269 +++ b/drivers/gpu/vga/vgaarb.c
6270 @@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie,
6271  			void (*irq_set_state)(void *cookie, bool state),
6272  			unsigned int (*set_vga_decode)(void *cookie, bool decode))
6273  {
6274 -	int ret = -1;
6275 +	int ret = -ENODEV;
6276  	struct vga_device *vgadev;
6277  	unsigned long flags;
6278  
6279 diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
6280 index 8ca7f65..54b017a 100644
6281 --- a/drivers/hid/hid-egalax.c
6282 +++ b/drivers/hid/hid-egalax.c
6283 @@ -31,7 +31,7 @@ struct egalax_data {
6284  	bool first;		/* is this the first finger in the frame? */
6285  	bool valid;		/* valid finger data, or just placeholder? */
6286  	bool activity;		/* at least one active finger previously? */
6287 -	__u16 lastx, lasty;	/* latest valid (x, y) in the frame */
6288 +	__u16 lastx, lasty, lastz;	/* latest valid (x, y, z) in the frame */
6289  };
6290  
6291  static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
6292 @@ -79,6 +79,10 @@ static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
6293  		case HID_DG_TIPPRESSURE:
6294  			hid_map_usage(hi, usage, bit, max,
6295  					EV_ABS, ABS_MT_PRESSURE);
6296 +			/* touchscreen emulation */
6297 +			input_set_abs_params(hi->input, ABS_PRESSURE,
6298 +						field->logical_minimum,
6299 +						field->logical_maximum, 0, 0);
6300  			return 1;
6301  		}
6302  		return 0;
6303 @@ -109,8 +113,8 @@ static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
6304  	if (td->valid) {
6305  		/* emit multitouch events */
6306  		input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
6307 -		input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
6308 -		input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
6309 +		input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x >> 3);
6310 +		input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y >> 3);
6311  		input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
6312  
6313  		input_mt_sync(input);
6314 @@ -121,6 +125,7 @@ static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
6315  		 */
6316  		td->lastx = td->x;
6317  		td->lasty = td->y;
6318 +		td->lastz = td->z;
6319  	}
6320  
6321  	/*
6322 @@ -129,8 +134,9 @@ static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
6323  	 * the oldest on the panel, the one we want for single touch
6324  	 */
6325  	if (!td->first && td->activity) {
6326 -		input_event(input, EV_ABS, ABS_X, td->lastx);
6327 -		input_event(input, EV_ABS, ABS_Y, td->lasty);
6328 +		input_event(input, EV_ABS, ABS_X, td->lastx >> 3);
6329 +		input_event(input, EV_ABS, ABS_Y, td->lasty >> 3);
6330 + 		input_event(input, EV_ABS, ABS_PRESSURE, td->lastz);
6331  	}
6332  
6333  	if (!td->valid) {
6334 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
6335 index f0260c6..859ee7e 100644
6336 --- a/drivers/hid/usbhid/hid-quirks.c
6337 +++ b/drivers/hid/usbhid/hid-quirks.c
6338 @@ -34,7 +34,6 @@ static const struct hid_blacklist {
6339  	{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
6340  	{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
6341  	{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
6342 -	{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
6343  	{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
6344  	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
6345  	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
6346 diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
6347 index 65335b2..9975bbf 100644
6348 --- a/drivers/hwmon/adm1026.c
6349 +++ b/drivers/hwmon/adm1026.c
6350 @@ -916,27 +916,27 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
6351  	int nr = sensor_attr->index;
6352  	struct i2c_client *client = to_i2c_client(dev);
6353  	struct adm1026_data *data = i2c_get_clientdata(client);
6354 -	int val, orig_div, new_div, shift;
6355 +	int val, orig_div, new_div;
6356  
6357  	val = simple_strtol(buf, NULL, 10);
6358  	new_div = DIV_TO_REG(val);
6359 -	if (new_div == 0) {
6360 -		return -EINVAL;
6361 -	}
6362 +
6363  	mutex_lock(&data->update_lock);
6364  	orig_div = data->fan_div[nr];
6365  	data->fan_div[nr] = DIV_FROM_REG(new_div);
6366  
6367  	if (nr < 4) { /* 0 <= nr < 4 */
6368 -		shift = 2 * nr;
6369  		adm1026_write_value(client, ADM1026_REG_FAN_DIV_0_3,
6370 -			((DIV_TO_REG(orig_div) & (~(0x03 << shift))) |
6371 -			(new_div << shift)));
6372 +				    (DIV_TO_REG(data->fan_div[0]) << 0) |
6373 +				    (DIV_TO_REG(data->fan_div[1]) << 2) |
6374 +				    (DIV_TO_REG(data->fan_div[2]) << 4) |
6375 +				    (DIV_TO_REG(data->fan_div[3]) << 6));
6376  	} else { /* 3 < nr < 8 */
6377 -		shift = 2 * (nr - 4);
6378  		adm1026_write_value(client, ADM1026_REG_FAN_DIV_4_7,
6379 -			((DIV_TO_REG(orig_div) & (~(0x03 << (2 * shift)))) |
6380 -			(new_div << shift)));
6381 +				    (DIV_TO_REG(data->fan_div[4]) << 0) |
6382 +				    (DIV_TO_REG(data->fan_div[5]) << 2) |
6383 +				    (DIV_TO_REG(data->fan_div[6]) << 4) |
6384 +				    (DIV_TO_REG(data->fan_div[7]) << 6));
6385  	}
6386  
6387  	if (data->fan_div[nr] != orig_div) {
6388 diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
6389 index b6598aa..87a5fd51 100644
6390 --- a/drivers/hwmon/applesmc.c
6391 +++ b/drivers/hwmon/applesmc.c
6392 @@ -162,6 +162,10 @@ static const char *temperature_sensors_sets[][41] = {
6393  /* Set 22: MacBook Pro 7,1 */
6394  	{ "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
6395  	  "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
6396 +/* Set 23: MacBook Air 3,1 */
6397 +	{ "TB0T", "TB1T", "TB2T", "TC0D", "TC0E", "TC0P", "TC1E", "TCZ3",
6398 +	  "TCZ4", "TCZ5", "TG0E", "TG1E", "TG2E", "TGZ3", "TGZ4", "TGZ5",
6399 +	  "TH0F", "TH0O", "TM0P" },
6400  };
6401  
6402  /* List of keys used to read/write fan speeds */
6403 @@ -444,38 +448,22 @@ static int applesmc_read_motion_sensor(int index, s16* value)
6404  }
6405  
6406  /*
6407 - * applesmc_device_init - initialize the accelerometer.  Returns zero on success
6408 - * and negative error code on failure.  Can sleep.
6409 + * applesmc_device_init - initialize the accelerometer.  Can sleep.
6410   */
6411 -static int applesmc_device_init(void)
6412 +static void applesmc_device_init(void)
6413  {
6414 -	int total, ret = -ENXIO;
6415 +	int total;
6416  	u8 buffer[2];
6417  
6418  	if (!applesmc_accelerometer)
6419 -		return 0;
6420 +		return;
6421  
6422  	mutex_lock(&applesmc_lock);
6423  
6424  	for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
6425 -		if (debug)
6426 -			printk(KERN_DEBUG "applesmc try %d\n", total);
6427  		if (!applesmc_read_key(MOTION_SENSOR_KEY, buffer, 2) &&
6428 -				(buffer[0] != 0x00 || buffer[1] != 0x00)) {
6429 -			if (total == INIT_TIMEOUT_MSECS) {
6430 -				printk(KERN_DEBUG "applesmc: device has"
6431 -						" already been initialized"
6432 -						" (0x%02x, 0x%02x).\n",
6433 -						buffer[0], buffer[1]);
6434 -			} else {
6435 -				printk(KERN_DEBUG "applesmc: device"
6436 -						" successfully initialized"
6437 -						" (0x%02x, 0x%02x).\n",
6438 -						buffer[0], buffer[1]);
6439 -			}
6440 -			ret = 0;
6441 +				(buffer[0] != 0x00 || buffer[1] != 0x00))
6442  			goto out;
6443 -		}
6444  		buffer[0] = 0xe0;
6445  		buffer[1] = 0x00;
6446  		applesmc_write_key(MOTION_SENSOR_KEY, buffer, 2);
6447 @@ -486,7 +474,6 @@ static int applesmc_device_init(void)
6448  
6449  out:
6450  	mutex_unlock(&applesmc_lock);
6451 -	return ret;
6452  }
6453  
6454  /*
6455 @@ -512,13 +499,8 @@ static int applesmc_get_fan_count(void)
6456  /* Device model stuff */
6457  static int applesmc_probe(struct platform_device *dev)
6458  {
6459 -	int ret;
6460 -
6461 -	ret = applesmc_device_init();
6462 -	if (ret)
6463 -		return ret;
6464 +	applesmc_device_init();
6465  
6466 -	printk(KERN_INFO "applesmc: device successfully initialized.\n");
6467  	return 0;
6468  }
6469  
6470 @@ -535,9 +517,7 @@ static int applesmc_pm_resume(struct device *dev)
6471  /* Reinitialize device on resume from hibernation */
6472  static int applesmc_pm_restore(struct device *dev)
6473  {
6474 -	int ret = applesmc_device_init();
6475 -	if (ret)
6476 -		return ret;
6477 +	applesmc_device_init();
6478  	return applesmc_pm_resume(dev);
6479  }
6480  
6481 @@ -1524,11 +1504,17 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
6482  	{ .accelerometer = 1, .light = 1, .temperature_set = 21 },
6483  /* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
6484  	{ .accelerometer = 1, .light = 1, .temperature_set = 22 },
6485 +/* MacBook Air 3,1: accelerometer, backlight and temperature set 23 */
6486 +	{ .accelerometer = 0, .light = 0, .temperature_set = 23 },
6487  };
6488  
6489  /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
6490   * So we need to put "Apple MacBook Pro" before "Apple MacBook". */
6491  static __initdata struct dmi_system_id applesmc_whitelist[] = {
6492 +	{ applesmc_dmi_match, "Apple MacBook Air 3", {
6493 +	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
6494 +	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3") },
6495 +		&applesmc_dmi_data[23]},
6496  	{ applesmc_dmi_match, "Apple MacBook Air 2", {
6497  	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
6498  	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir2") },
6499 diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
6500 index 776aeb3..508cb29 100644
6501 --- a/drivers/hwmon/lm63.c
6502 +++ b/drivers/hwmon/lm63.c
6503 @@ -98,6 +98,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
6504   * value, it uses signed 8-bit values with LSB = 1 degree Celsius.
6505   * For remote temperature, low and high limits, it uses signed 11-bit values
6506   * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
6507 + * For LM64 the actual remote diode temperature is 16 degree Celsius higher
6508 + * than the register reading. Remote temperature setpoints have to be
6509 + * adapted accordingly.
6510   */
6511  
6512  #define FAN_FROM_REG(reg)	((reg) == 0xFFFC || (reg) == 0 ? 0 : \
6513 @@ -165,6 +168,8 @@ struct lm63_data {
6514  	struct mutex update_lock;
6515  	char valid; /* zero until following fields are valid */
6516  	unsigned long last_updated; /* in jiffies */
6517 +	int kind;
6518 +	int temp2_offset;
6519  
6520  	/* registers values */
6521  	u8 config, config_fan;
6522 @@ -247,16 +252,34 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum
6523  	return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
6524  }
6525  
6526 -static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
6527 -			  char *buf)
6528 +/*
6529 + * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
6530 + * For remote sensor registers temp2_offset has to be considered,
6531 + * for local sensor it must not.
6532 + * So we need separate 8bit accessors for local and remote sensor.
6533 + */
6534 +static ssize_t show_local_temp8(struct device *dev,
6535 +				struct device_attribute *devattr,
6536 +				char *buf)
6537  {
6538  	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
6539  	struct lm63_data *data = lm63_update_device(dev);
6540  	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
6541  }
6542  
6543 -static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy,
6544 -			 const char *buf, size_t count)
6545 +static ssize_t show_remote_temp8(struct device *dev,
6546 +				 struct device_attribute *devattr,
6547 +				 char *buf)
6548 +{
6549 +	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
6550 +	struct lm63_data *data = lm63_update_device(dev);
6551 +	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
6552 +		       + data->temp2_offset);
6553 +}
6554 +
6555 +static ssize_t set_local_temp8(struct device *dev,
6556 +			       struct device_attribute *dummy,
6557 +			       const char *buf, size_t count)
6558  {
6559  	struct i2c_client *client = to_i2c_client(dev);
6560  	struct lm63_data *data = i2c_get_clientdata(client);
6561 @@ -274,7 +297,8 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
6562  {
6563  	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
6564  	struct lm63_data *data = lm63_update_device(dev);
6565 -	return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]));
6566 +	return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
6567 +		       + data->temp2_offset);
6568  }
6569  
6570  static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
6571 @@ -294,7 +318,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
6572  	int nr = attr->index;
6573  
6574  	mutex_lock(&data->update_lock);
6575 -	data->temp11[nr] = TEMP11_TO_REG(val);
6576 +	data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
6577  	i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
6578  				  data->temp11[nr] >> 8);
6579  	i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
6580 @@ -310,6 +334,7 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute
6581  {
6582  	struct lm63_data *data = lm63_update_device(dev);
6583  	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
6584 +		       + data->temp2_offset
6585  		       - TEMP8_FROM_REG(data->temp2_crit_hyst));
6586  }
6587  
6588 @@ -324,7 +349,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *
6589  	long hyst;
6590  
6591  	mutex_lock(&data->update_lock);
6592 -	hyst = TEMP8_FROM_REG(data->temp8[2]) - val;
6593 +	hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
6594  	i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
6595  				  HYST_TO_REG(hyst));
6596  	mutex_unlock(&data->update_lock);
6597 @@ -355,16 +380,21 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
6598  static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
6599  static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
6600  
6601 -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0);
6602 -static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
6603 -	set_temp8, 1);
6604 +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
6605 +static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
6606 +	set_local_temp8, 1);
6607  
6608  static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
6609  static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
6610  	set_temp11, 1);
6611  static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
6612  	set_temp11, 2);
6613 -static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2);
6614 +/*
6615 + * On LM63, temp2_crit can be set only once, which should be job
6616 + * of the bootloader.
6617 + */
6618 +static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
6619 +	NULL, 2);
6620  static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
6621  	set_temp2_crit_hyst);
6622  
6623 @@ -479,7 +509,12 @@ static int lm63_probe(struct i2c_client *new_client,
6624  	data->valid = 0;
6625  	mutex_init(&data->update_lock);
6626  
6627 -	/* Initialize the LM63 chip */
6628 +	/* Set the device type */
6629 +	data->kind = id->driver_data;
6630 +	if (data->kind == lm64)
6631 +		data->temp2_offset = 16000;
6632 +
6633 +	/* Initialize chip */
6634  	lm63_init_client(new_client);
6635  
6636  	/* Register sysfs hooks */
6637 diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
6638 index b3841a6..2e8f0c9 100644
6639 --- a/drivers/hwmon/lm85.c
6640 +++ b/drivers/hwmon/lm85.c
6641 @@ -1259,6 +1259,7 @@ static int lm85_probe(struct i2c_client *client,
6642  	switch (data->type) {
6643  	case adm1027:
6644  	case adt7463:
6645 +	case adt7468:
6646  	case emc6d100:
6647  	case emc6d102:
6648  		data->freq_map = adm1027_freq_map;
6649 diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
6650 index f397ce7..b2074e3 100644
6651 --- a/drivers/hwmon/via686a.c
6652 +++ b/drivers/hwmon/via686a.c
6653 @@ -687,6 +687,13 @@ static int __devexit via686a_remove(struct platform_device *pdev)
6654  	return 0;
6655  }
6656  
6657 +static void via686a_update_fan_div(struct via686a_data *data)
6658 +{
6659 +	int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
6660 +	data->fan_div[0] = (reg >> 4) & 0x03;
6661 +	data->fan_div[1] = reg >> 6;
6662 +}
6663 +
6664  static void __devinit via686a_init_device(struct via686a_data *data)
6665  {
6666  	u8 reg;
6667 @@ -700,6 +707,9 @@ static void __devinit via686a_init_device(struct via686a_data *data)
6668  	via686a_write_value(data, VIA686A_REG_TEMP_MODE,
6669  			    (reg & ~VIA686A_TEMP_MODE_MASK)
6670  			    | VIA686A_TEMP_MODE_CONTINUOUS);
6671 +
6672 +	/* Pre-read fan clock divisor values */
6673 +	via686a_update_fan_div(data);
6674  }
6675  
6676  static struct via686a_data *via686a_update_device(struct device *dev)
6677 @@ -751,9 +761,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
6678  		    (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
6679  		     0xc0) >> 6;
6680  
6681 -		i = via686a_read_value(data, VIA686A_REG_FANDIV);
6682 -		data->fan_div[0] = (i >> 4) & 0x03;
6683 -		data->fan_div[1] = i >> 6;
6684 +		via686a_update_fan_div(data);
6685  		data->alarms =
6686  		    via686a_read_value(data,
6687  				       VIA686A_REG_ALARM1) |
6688 diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
6689 index 5f6d7f8..ace6799 100644
6690 --- a/drivers/i2c/busses/i2c-pca-platform.c
6691 +++ b/drivers/i2c/busses/i2c-pca-platform.c
6692 @@ -224,7 +224,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
6693  
6694  	if (irq) {
6695  		ret = request_irq(irq, i2c_pca_pf_handler,
6696 -			IRQF_TRIGGER_FALLING, i2c->adap.name, i2c);
6697 +			IRQF_TRIGGER_FALLING, pdev->name, i2c);
6698  		if (ret)
6699  			goto e_reqirq;
6700  	}
6701 diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
6702 index bea4c50..c16a448 100644
6703 --- a/drivers/i2c/i2c-core.c
6704 +++ b/drivers/i2c/i2c-core.c
6705 @@ -1005,6 +1005,14 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
6706  static int __unregister_client(struct device *dev, void *dummy)
6707  {
6708  	struct i2c_client *client = i2c_verify_client(dev);
6709 +	if (client && strcmp(client->name, "dummy"))
6710 +		i2c_unregister_device(client);
6711 +	return 0;
6712 +}
6713 +
6714 +static int __unregister_dummy(struct device *dev, void *dummy)
6715 +{
6716 +	struct i2c_client *client = i2c_verify_client(dev);
6717  	if (client)
6718  		i2c_unregister_device(client);
6719  	return 0;
6720 @@ -1059,8 +1067,12 @@ int i2c_del_adapter(struct i2c_adapter *adap)
6721  	mutex_unlock(&adap->userspace_clients_lock);
6722  
6723  	/* Detach any active clients. This can't fail, thus we do not
6724 -	   checking the returned value. */
6725 +	 * check the returned value. This is a two-pass process, because
6726 +	 * we can't remove the dummy devices during the first pass: they
6727 +	 * could have been instantiated by real devices wishing to clean
6728 +	 * them up properly, so we give them a chance to do that first. */
6729  	res = device_for_each_child(&adap->dev, NULL, __unregister_client);
6730 +	res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
6731  
6732  #ifdef CONFIG_I2C_COMPAT
6733  	class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
6734 diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
6735 index c37ef64..cf88588 100644
6736 --- a/drivers/idle/intel_idle.c
6737 +++ b/drivers/idle/intel_idle.c
6738 @@ -276,7 +276,7 @@ static int intel_idle_probe(void)
6739  
6740  	case 0x1C:	/* 28 - Atom Processor */
6741  	case 0x26:	/* 38 - Lincroft Atom Processor */
6742 -		lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
6743 +		lapic_timer_reliable_states = (1 << 1); /* C1 */
6744  		cpuidle_state_table = atom_cstates;
6745  		break;
6746  #ifdef FUTURE_USE
6747 diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
6748 index 6fcfbeb..abb8714 100644
6749 --- a/drivers/infiniband/core/uverbs_cmd.c
6750 +++ b/drivers/infiniband/core/uverbs_cmd.c
6751 @@ -891,68 +891,81 @@ out:
6752  	return ret ? ret : in_len;
6753  }
6754  
6755 +static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
6756 +{
6757 +	struct ib_uverbs_wc tmp;
6758 +
6759 +	tmp.wr_id		= wc->wr_id;
6760 +	tmp.status		= wc->status;
6761 +	tmp.opcode		= wc->opcode;
6762 +	tmp.vendor_err		= wc->vendor_err;
6763 +	tmp.byte_len		= wc->byte_len;
6764 +	tmp.ex.imm_data		= (__u32 __force) wc->ex.imm_data;
6765 +	tmp.qp_num		= wc->qp->qp_num;
6766 +	tmp.src_qp		= wc->src_qp;
6767 +	tmp.wc_flags		= wc->wc_flags;
6768 +	tmp.pkey_index		= wc->pkey_index;
6769 +	tmp.slid		= wc->slid;
6770 +	tmp.sl			= wc->sl;
6771 +	tmp.dlid_path_bits	= wc->dlid_path_bits;
6772 +	tmp.port_num		= wc->port_num;
6773 +	tmp.reserved		= 0;
6774 +
6775 +	if (copy_to_user(dest, &tmp, sizeof tmp))
6776 +		return -EFAULT;
6777 +
6778 +	return 0;
6779 +}
6780 +
6781  ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
6782  			  const char __user *buf, int in_len,
6783  			  int out_len)
6784  {
6785  	struct ib_uverbs_poll_cq       cmd;
6786 -	struct ib_uverbs_poll_cq_resp *resp;
6787 +	struct ib_uverbs_poll_cq_resp  resp;
6788 +	u8 __user                     *header_ptr;
6789 +	u8 __user                     *data_ptr;
6790  	struct ib_cq                  *cq;
6791 -	struct ib_wc                  *wc;
6792 -	int                            ret = 0;
6793 -	int                            i;
6794 -	int                            rsize;
6795 +	struct ib_wc                   wc;
6796 +	int                            ret;
6797  
6798  	if (copy_from_user(&cmd, buf, sizeof cmd))
6799  		return -EFAULT;
6800  
6801 -	wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL);
6802 -	if (!wc)
6803 -		return -ENOMEM;
6804 -
6805 -	rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc);
6806 -	resp = kmalloc(rsize, GFP_KERNEL);
6807 -	if (!resp) {
6808 -		ret = -ENOMEM;
6809 -		goto out_wc;
6810 -	}
6811 -
6812  	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
6813 -	if (!cq) {
6814 -		ret = -EINVAL;
6815 -		goto out;
6816 -	}
6817 +	if (!cq)
6818 +		return -EINVAL;
6819  
6820 -	resp->count = ib_poll_cq(cq, cmd.ne, wc);
6821 +	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
6822 +	header_ptr = (void __user *)(unsigned long) cmd.response;
6823 +	data_ptr = header_ptr + sizeof resp;
6824  
6825 -	put_cq_read(cq);
6826 +	memset(&resp, 0, sizeof resp);
6827 +	while (resp.count < cmd.ne) {
6828 +		ret = ib_poll_cq(cq, 1, &wc);
6829 +		if (ret < 0)
6830 +			goto out_put;
6831 +		if (!ret)
6832 +			break;
6833 +
6834 +		ret = copy_wc_to_user(data_ptr, &wc);
6835 +		if (ret)
6836 +			goto out_put;
6837  
6838 -	for (i = 0; i < resp->count; i++) {
6839 -		resp->wc[i].wr_id 	   = wc[i].wr_id;
6840 -		resp->wc[i].status 	   = wc[i].status;
6841 -		resp->wc[i].opcode 	   = wc[i].opcode;
6842 -		resp->wc[i].vendor_err 	   = wc[i].vendor_err;
6843 -		resp->wc[i].byte_len 	   = wc[i].byte_len;
6844 -		resp->wc[i].ex.imm_data    = (__u32 __force) wc[i].ex.imm_data;
6845 -		resp->wc[i].qp_num 	   = wc[i].qp->qp_num;
6846 -		resp->wc[i].src_qp 	   = wc[i].src_qp;
6847 -		resp->wc[i].wc_flags 	   = wc[i].wc_flags;
6848 -		resp->wc[i].pkey_index 	   = wc[i].pkey_index;
6849 -		resp->wc[i].slid 	   = wc[i].slid;
6850 -		resp->wc[i].sl 		   = wc[i].sl;
6851 -		resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits;
6852 -		resp->wc[i].port_num 	   = wc[i].port_num;
6853 +		data_ptr += sizeof(struct ib_uverbs_wc);
6854 +		++resp.count;
6855  	}
6856  
6857 -	if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize))
6858 +	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
6859  		ret = -EFAULT;
6860 +		goto out_put;
6861 +	}
6862  
6863 -out:
6864 -	kfree(resp);
6865 +	ret = in_len;
6866  
6867 -out_wc:
6868 -	kfree(wc);
6869 -	return ret ? ret : in_len;
6870 +out_put:
6871 +	put_cq_read(cq);
6872 +	return ret;
6873  }
6874  
6875  ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
6876 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
6877 index 32d352a..081d061 100644
6878 --- a/drivers/infiniband/hw/cxgb4/cm.c
6879 +++ b/drivers/infiniband/hw/cxgb4/cm.c
6880 @@ -383,7 +383,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
6881  					  16)) | FW_WR_FLOWID(ep->hwtid));
6882  
6883  	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
6884 -	flowc->mnemval[0].val = cpu_to_be32(0);
6885 +	flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
6886  	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
6887  	flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
6888  	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
6889 diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
6890 index b952317..ee82851 100644
6891 --- a/drivers/input/mouse/bcm5974.c
6892 +++ b/drivers/input/mouse/bcm5974.c
6893 @@ -55,6 +55,14 @@
6894  #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI	0x0236
6895  #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO	0x0237
6896  #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS	0x0238
6897 +/* MacbookAir3,2 (unibody), aka wellspring5 */
6898 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI	0x023f
6899 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO	0x0240
6900 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS	0x0241
6901 +/* MacbookAir3,1 (unibody), aka wellspring4 */
6902 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI	0x0242
6903 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO	0x0243
6904 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS	0x0244
6905  
6906  #define BCM5974_DEVICE(prod) {					\
6907  	.match_flags = (USB_DEVICE_ID_MATCH_DEVICE |		\
6908 @@ -80,6 +88,14 @@ static const struct usb_device_id bcm5974_table[] = {
6909  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
6910  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
6911  	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
6912 +	/* MacbookAir3,2 */
6913 +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
6914 +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
6915 +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
6916 +	/* MacbookAir3,1 */
6917 +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
6918 +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
6919 +	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
6920  	/* Terminating entry */
6921  	{}
6922  };
6923 @@ -234,6 +250,30 @@ static const struct bcm5974_config bcm5974_config_table[] = {
6924  		{ DIM_X, DIM_X / SN_COORD, -4460, 5166 },
6925  		{ DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
6926  	},
6927 +	{
6928 +		USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI,
6929 +		USB_DEVICE_ID_APPLE_WELLSPRING4_ISO,
6930 +		USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
6931 +		HAS_INTEGRATED_BUTTON,
6932 +		0x84, sizeof(struct bt_data),
6933 +		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
6934 +		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
6935 +		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
6936 +		{ DIM_X, DIM_X / SN_COORD, -4620, 5140 },
6937 +		{ DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
6938 +	},
6939 +	{
6940 +		USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI,
6941 +		USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO,
6942 +		USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
6943 +		HAS_INTEGRATED_BUTTON,
6944 +		0x84, sizeof(struct bt_data),
6945 +		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
6946 +		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
6947 +		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
6948 +		{ DIM_X, DIM_X / SN_COORD, -4616, 5112 },
6949 +		{ DIM_Y, DIM_Y / SN_COORD, -142, 5234 }
6950 +	},
6951  	{}
6952  };
6953  
6954 diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
6955 index b6aa7d2..298c8e5 100644
6956 --- a/drivers/input/mouse/synaptics.h
6957 +++ b/drivers/input/mouse/synaptics.h
6958 @@ -51,7 +51,8 @@
6959  #define SYN_EXT_CAP_REQUESTS(c)		(((c) & 0x700000) >> 20)
6960  #define SYN_CAP_MULTI_BUTTON_NO(ec)	(((ec) & 0x00f000) >> 12)
6961  #define SYN_CAP_PRODUCT_ID(ec)		(((ec) & 0xff0000) >> 16)
6962 -#define SYN_CAP_CLICKPAD(ex0c)		((ex0c) & 0x100100)
6963 +#define SYN_CAP_CLICKPAD(ex0c)		((ex0c) & 0x100000) /* 1-button ClickPad */
6964 +#define SYN_CAP_CLICKPAD2BTN(ex0c)	((ex0c) & 0x000100) /* 2-button ClickPad */
6965  #define SYN_CAP_MAX_DIMENSIONS(ex0c)	((ex0c) & 0x020000)
6966  
6967  /* synaptics modes query bits */
6968 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
6969 index ed7ad74..a0730fd 100644
6970 --- a/drivers/input/serio/i8042-x86ia64io.h
6971 +++ b/drivers/input/serio/i8042-x86ia64io.h
6972 @@ -333,6 +333,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
6973  		},
6974  	},
6975  	{
6976 +		/* Sony Vaio VPCZ122GX */
6977 +		.matches = {
6978 +			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
6979 +			DMI_MATCH(DMI_PRODUCT_NAME, "VPCZ122GX"),
6980 +		},
6981 +	},
6982 +	{
6983  		/* Sony Vaio FS-115b */
6984  		.matches = {
6985  			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
6986 @@ -413,6 +420,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
6987  			DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
6988  		},
6989  	},
6990 +	{
6991 +		/* Dell Vostro V13 */
6992 +		.matches = {
6993 +			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
6994 +			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
6995 +		},
6996 +	},
6997  	{ }
6998  };
6999  
7000 @@ -534,6 +548,17 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
7001  };
7002  #endif
7003  
7004 +static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
7005 +	{
7006 +		/* Dell Vostro V13 */
7007 +		.matches = {
7008 +			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
7009 +			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
7010 +		},
7011 +	},
7012 +	{ }
7013 +};
7014 +
7015  /*
7016   * Some Wistron based laptops need us to explicitly enable the 'Dritek
7017   * keyboard extension' to make their extra keys start generating scancodes.
7018 @@ -886,6 +911,9 @@ static int __init i8042_platform_init(void)
7019  	if (dmi_check_system(i8042_dmi_nomux_table))
7020  		i8042_nomux = true;
7021  
7022 +	if (dmi_check_system(i8042_dmi_notimeout_table))
7023 +		i8042_notimeout = true;
7024 +
7025  	if (dmi_check_system(i8042_dmi_dritek_table))
7026  		i8042_dritek = true;
7027  #endif /* CONFIG_X86 */
7028 diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
7029 index f585131..9e48650 100644
7030 --- a/drivers/input/serio/i8042.c
7031 +++ b/drivers/input/serio/i8042.c
7032 @@ -61,6 +61,10 @@ static bool i8042_noloop;
7033  module_param_named(noloop, i8042_noloop, bool, 0);
7034  MODULE_PARM_DESC(noloop, "Disable the AUX Loopback command while probing for the AUX port");
7035  
7036 +static bool i8042_notimeout;
7037 +module_param_named(notimeout, i8042_notimeout, bool, 0);
7038 +MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
7039 +
7040  #ifdef CONFIG_X86
7041  static bool i8042_dritek;
7042  module_param_named(dritek, i8042_dritek, bool, 0);
7043 @@ -503,7 +507,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
7044  	} else {
7045  
7046  		dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
7047 -		      ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
7048 +		      ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0);
7049  
7050  		port_no = (str & I8042_STR_AUXDATA) ?
7051  				I8042_AUX_PORT_NO : I8042_KBD_PORT_NO;
7052 diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
7053 index 707d9c9..131976d 100644
7054 --- a/drivers/isdn/gigaset/bas-gigaset.c
7055 +++ b/drivers/isdn/gigaset/bas-gigaset.c
7056 @@ -438,23 +438,27 @@ static void cmd_in_timeout(unsigned long data)
7057  		return;
7058  	}
7059  
7060 -	if (ucs->retry_cmd_in++ < BAS_RETRY) {
7061 -		dev_notice(cs->dev, "control read: timeout, retry %d\n",
7062 -			   ucs->retry_cmd_in);
7063 -		rc = atread_submit(cs, BAS_TIMEOUT);
7064 -		if (rc >= 0 || rc == -ENODEV)
7065 -			/* resubmitted or disconnected */
7066 -			/* - bypass regular exit block */
7067 -			return;
7068 -	} else {
7069 +	if (ucs->retry_cmd_in++ >= BAS_RETRY) {
7070  		dev_err(cs->dev,
7071  			"control read: timeout, giving up after %d tries\n",
7072  			ucs->retry_cmd_in);
7073 +		kfree(ucs->rcvbuf);
7074 +		ucs->rcvbuf = NULL;
7075 +		ucs->rcvbuf_size = 0;
7076 +		error_reset(cs);
7077 +		return;
7078 +	}
7079 +
7080 +	gig_dbg(DEBUG_USBREQ, "%s: timeout, retry %d",
7081 +		__func__, ucs->retry_cmd_in);
7082 +	rc = atread_submit(cs, BAS_TIMEOUT);
7083 +	if (rc < 0) {
7084 +		kfree(ucs->rcvbuf);
7085 +		ucs->rcvbuf = NULL;
7086 +		ucs->rcvbuf_size = 0;
7087 +		if (rc != -ENODEV)
7088 +			error_reset(cs);
7089  	}
7090 -	kfree(ucs->rcvbuf);
7091 -	ucs->rcvbuf = NULL;
7092 -	ucs->rcvbuf_size = 0;
7093 -	error_reset(cs);
7094  }
7095  
7096  /* read_ctrl_callback
7097 @@ -470,18 +474,11 @@ static void read_ctrl_callback(struct urb *urb)
7098  	struct cardstate *cs = inbuf->cs;
7099  	struct bas_cardstate *ucs = cs->hw.bas;
7100  	int status = urb->status;
7101 -	int have_data = 0;
7102  	unsigned numbytes;
7103  	int rc;
7104  
7105  	update_basstate(ucs, 0, BS_ATRDPEND);
7106  	wake_up(&ucs->waitqueue);
7107 -
7108 -	if (!ucs->rcvbuf_size) {
7109 -		dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
7110 -		return;
7111 -	}
7112 -
7113  	del_timer(&ucs->timer_cmd_in);
7114  
7115  	switch (status) {
7116 @@ -495,19 +492,10 @@ static void read_ctrl_callback(struct urb *urb)
7117  				numbytes = ucs->rcvbuf_size;
7118  		}
7119  
7120 -		/* copy received bytes to inbuf */
7121 -		have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
7122 -
7123 -		if (unlikely(numbytes < ucs->rcvbuf_size)) {
7124 -			/* incomplete - resubmit for remaining bytes */
7125 -			ucs->rcvbuf_size -= numbytes;
7126 -			ucs->retry_cmd_in = 0;
7127 -			rc = atread_submit(cs, BAS_TIMEOUT);
7128 -			if (rc >= 0 || rc == -ENODEV)
7129 -				/* resubmitted or disconnected */
7130 -				/* - bypass regular exit block */
7131 -				return;
7132 -			error_reset(cs);
7133 +		/* copy received bytes to inbuf, notify event layer */
7134 +		if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) {
7135 +			gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
7136 +			gigaset_schedule_event(cs);
7137  		}
7138  		break;
7139  
7140 @@ -516,37 +504,32 @@ static void read_ctrl_callback(struct urb *urb)
7141  	case -EINPROGRESS:		/* pending */
7142  	case -ENODEV:			/* device removed */
7143  	case -ESHUTDOWN:		/* device shut down */
7144 -		/* no action necessary */
7145 +		/* no further action necessary */
7146  		gig_dbg(DEBUG_USBREQ, "%s: %s",
7147  			__func__, get_usb_statmsg(status));
7148  		break;
7149  
7150 -	default:			/* severe trouble */
7151 -		dev_warn(cs->dev, "control read: %s\n",
7152 -			 get_usb_statmsg(status));
7153 +	default:			/* other errors: retry */
7154  		if (ucs->retry_cmd_in++ < BAS_RETRY) {
7155 -			dev_notice(cs->dev, "control read: retry %d\n",
7156 -				   ucs->retry_cmd_in);
7157 +			gig_dbg(DEBUG_USBREQ, "%s: %s, retry %d", __func__,
7158 +				get_usb_statmsg(status), ucs->retry_cmd_in);
7159  			rc = atread_submit(cs, BAS_TIMEOUT);
7160 -			if (rc >= 0 || rc == -ENODEV)
7161 -				/* resubmitted or disconnected */
7162 -				/* - bypass regular exit block */
7163 +			if (rc >= 0)
7164 +				/* successfully resubmitted, skip freeing */
7165  				return;
7166 -		} else {
7167 -			dev_err(cs->dev,
7168 -				"control read: giving up after %d tries\n",
7169 -				ucs->retry_cmd_in);
7170 +			if (rc == -ENODEV)
7171 +				/* disconnect, no further action necessary */
7172 +				break;
7173  		}
7174 +		dev_err(cs->dev, "control read: %s, giving up after %d tries\n",
7175 +			get_usb_statmsg(status), ucs->retry_cmd_in);
7176  		error_reset(cs);
7177  	}
7178  
7179 +	/* read finished, free buffer */
7180  	kfree(ucs->rcvbuf);
7181  	ucs->rcvbuf = NULL;
7182  	ucs->rcvbuf_size = 0;
7183 -	if (have_data) {
7184 -		gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
7185 -		gigaset_schedule_event(cs);
7186 -	}
7187  }
7188  
7189  /* atread_submit
7190 @@ -1598,13 +1581,13 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
7191  
7192  	ret = starturbs(bcs);
7193  	if (ret < 0) {
7194 +		spin_unlock_irqrestore(&cs->lock, flags);
7195  		dev_err(cs->dev,
7196  			"could not start isochronous I/O for channel B%d: %s\n",
7197  			bcs->channel + 1,
7198  			ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
7199  		if (ret != -ENODEV)
7200  			error_hangup(bcs);
7201 -		spin_unlock_irqrestore(&cs->lock, flags);
7202  		return ret;
7203  	}
7204  
7205 @@ -1614,11 +1597,11 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
7206  		dev_err(cs->dev, "could not open channel B%d\n",
7207  			bcs->channel + 1);
7208  		stopurbs(bcs->hw.bas);
7209 -		if (ret != -ENODEV)
7210 -			error_hangup(bcs);
7211  	}
7212  
7213  	spin_unlock_irqrestore(&cs->lock, flags);
7214 +	if (ret < 0 && ret != -ENODEV)
7215 +		error_hangup(bcs);
7216  	return ret;
7217  }
7218  
7219 diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
7220 index 2dfd346..f39ccdf 100644
7221 --- a/drivers/isdn/gigaset/isocdata.c
7222 +++ b/drivers/isdn/gigaset/isocdata.c
7223 @@ -842,13 +842,14 @@ static inline void trans_receive(unsigned char *src, unsigned count,
7224  
7225  	if (unlikely(bcs->ignore)) {
7226  		bcs->ignore--;
7227 -		hdlc_flush(bcs);
7228  		return;
7229  	}
7230  	skb = bcs->rx_skb;
7231 -	if (skb == NULL)
7232 +	if (skb == NULL) {
7233  		skb = gigaset_new_rx_skb(bcs);
7234 -	bcs->hw.bas->goodbytes += skb->len;
7235 +		if (skb == NULL)
7236 +			return;
7237 +	}
7238  	dobytes = bcs->rx_bufsize - skb->len;
7239  	while (count > 0) {
7240  		dst = skb_put(skb, count < dobytes ? count : dobytes);
7241 @@ -860,6 +861,7 @@ static inline void trans_receive(unsigned char *src, unsigned count,
7242  		if (dobytes == 0) {
7243  			dump_bytes(DEBUG_STREAM_DUMP,
7244  				   "rcv data", skb->data, skb->len);
7245 +			bcs->hw.bas->goodbytes += skb->len;
7246  			gigaset_skb_rcvd(bcs, skb);
7247  			skb = gigaset_new_rx_skb(bcs);
7248  			if (skb == NULL)
7249 diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
7250 index a688293..614ebeb 100644
7251 --- a/drivers/leds/leds-ss4200.c
7252 +++ b/drivers/leds/leds-ss4200.c
7253 @@ -102,6 +102,7 @@ static struct dmi_system_id __initdata nas_led_whitelist[] = {
7254  			DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
7255  		}
7256  	},
7257 +	{}
7258  };
7259  
7260  /*
7261 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
7262 index 487ecda..406091f 100644
7263 --- a/drivers/md/dm-mpath.c
7264 +++ b/drivers/md/dm-mpath.c
7265 @@ -33,7 +33,6 @@ struct pgpath {
7266  	unsigned fail_count;		/* Cumulative failure count */
7267  
7268  	struct dm_path path;
7269 -	struct work_struct deactivate_path;
7270  	struct work_struct activate_path;
7271  };
7272  
7273 @@ -116,7 +115,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
7274  static void process_queued_ios(struct work_struct *work);
7275  static void trigger_event(struct work_struct *work);
7276  static void activate_path(struct work_struct *work);
7277 -static void deactivate_path(struct work_struct *work);
7278  
7279  
7280  /*-----------------------------------------------
7281 @@ -129,7 +127,6 @@ static struct pgpath *alloc_pgpath(void)
7282  
7283  	if (pgpath) {
7284  		pgpath->is_active = 1;
7285 -		INIT_WORK(&pgpath->deactivate_path, deactivate_path);
7286  		INIT_WORK(&pgpath->activate_path, activate_path);
7287  	}
7288  
7289 @@ -141,14 +138,6 @@ static void free_pgpath(struct pgpath *pgpath)
7290  	kfree(pgpath);
7291  }
7292  
7293 -static void deactivate_path(struct work_struct *work)
7294 -{
7295 -	struct pgpath *pgpath =
7296 -		container_of(work, struct pgpath, deactivate_path);
7297 -
7298 -	blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
7299 -}
7300 -
7301  static struct priority_group *alloc_priority_group(void)
7302  {
7303  	struct priority_group *pg;
7304 @@ -995,7 +984,6 @@ static int fail_path(struct pgpath *pgpath)
7305  		      pgpath->path.dev->name, m->nr_valid_paths);
7306  
7307  	schedule_work(&m->trigger_event);
7308 -	queue_work(kmultipathd, &pgpath->deactivate_path);
7309  
7310  out:
7311  	spin_unlock_irqrestore(&m->lock, flags);
7312 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
7313 index f9fc07d..87e4e78 100644
7314 --- a/drivers/md/dm-table.c
7315 +++ b/drivers/md/dm-table.c
7316 @@ -1136,11 +1136,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
7317  	 */
7318  	q->limits = *limits;
7319  
7320 -	if (limits->no_cluster)
7321 -		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
7322 -	else
7323 -		queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
7324 -
7325  	if (!dm_table_supports_discards(t))
7326  		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
7327  	else
7328 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
7329 index ac384b2..a173db5 100644
7330 --- a/drivers/md/dm.c
7331 +++ b/drivers/md/dm.c
7332 @@ -2111,13 +2111,14 @@ static void event_callback(void *context)
7333  	wake_up(&md->eventq);
7334  }
7335  
7336 +/*
7337 + * Protected by md->suspend_lock obtained by dm_swap_table().
7338 + */
7339  static void __set_size(struct mapped_device *md, sector_t size)
7340  {
7341  	set_capacity(md->disk, size);
7342  
7343 -	mutex_lock(&md->bdev->bd_inode->i_mutex);
7344  	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
7345 -	mutex_unlock(&md->bdev->bd_inode->i_mutex);
7346  }
7347  
7348  /*
7349 diff --git a/drivers/md/md.c b/drivers/md/md.c
7350 index f20d13e..0128597 100644
7351 --- a/drivers/md/md.c
7352 +++ b/drivers/md/md.c
7353 @@ -220,11 +220,14 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
7354  	mddev_t *mddev = q->queuedata;
7355  	int rv;
7356  	int cpu;
7357 +	unsigned int sectors;
7358  
7359 -	if (mddev == NULL || mddev->pers == NULL) {
7360 +	if (mddev == NULL || mddev->pers == NULL
7361 +	    || !mddev->ready) {
7362  		bio_io_error(bio);
7363  		return 0;
7364  	}
7365 +	smp_rmb(); /* Ensure implications of  'active' are visible */
7366  	rcu_read_lock();
7367  	if (mddev->suspended || mddev->barrier) {
7368  		DEFINE_WAIT(__wait);
7369 @@ -242,12 +245,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
7370  	atomic_inc(&mddev->active_io);
7371  	rcu_read_unlock();
7372  
7373 +	/*
7374 +	 * save the sectors now since our bio can
7375 +	 * go away inside make_request
7376 +	 */
7377 +	sectors = bio_sectors(bio);
7378  	rv = mddev->pers->make_request(mddev, bio);
7379  
7380  	cpu = part_stat_lock();
7381  	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
7382 -	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
7383 -		      bio_sectors(bio));
7384 +	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
7385  	part_stat_unlock();
7386  
7387  	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
7388 @@ -1329,7 +1336,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
7389  	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
7390  		       rdev->sb_page);
7391  	md_super_wait(rdev->mddev);
7392 -	return num_sectors / 2; /* kB for sysfs */
7393 +	return num_sectors;
7394  }
7395  
7396  
7397 @@ -1697,7 +1704,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
7398  	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
7399  		       rdev->sb_page);
7400  	md_super_wait(rdev->mddev);
7401 -	return num_sectors / 2; /* kB for sysfs */
7402 +	return num_sectors;
7403  }
7404  
7405  static struct super_type super_types[] = {
7406 @@ -2172,6 +2179,8 @@ repeat:
7407  	if (!mddev->persistent) {
7408  		clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
7409  		clear_bit(MD_CHANGE_DEVS, &mddev->flags);
7410 +		if (!mddev->external)
7411 +			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
7412  		wake_up(&mddev->sb_wait);
7413  		return;
7414  	}
7415 @@ -3107,7 +3116,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
7416  		char nm[20];
7417  		if (rdev->raid_disk < 0)
7418  			continue;
7419 -		if (rdev->new_raid_disk > mddev->raid_disks)
7420 +		if (rdev->new_raid_disk >= mddev->raid_disks)
7421  			rdev->new_raid_disk = -1;
7422  		if (rdev->new_raid_disk == rdev->raid_disk)
7423  			continue;
7424 @@ -4287,9 +4296,6 @@ static int md_alloc(dev_t dev, char *name)
7425  		goto abort;
7426  	mddev->queue->queuedata = mddev;
7427  
7428 -	/* Can be unlocked because the queue is new: no concurrency */
7429 -	queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
7430 -
7431  	blk_queue_make_request(mddev->queue, md_make_request);
7432  
7433  	disk = alloc_disk(1 << shift);
7434 @@ -4555,7 +4561,8 @@ int md_run(mddev_t *mddev)
7435  	mddev->safemode_timer.data = (unsigned long) mddev;
7436  	mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
7437  	mddev->in_sync = 1;
7438 -
7439 +	smp_wmb();
7440 +	mddev->ready = 1;
7441  	list_for_each_entry(rdev, &mddev->disks, same_set)
7442  		if (rdev->raid_disk >= 0) {
7443  			char nm[20];
7444 @@ -4717,6 +4724,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
7445  
7446  void md_stop(mddev_t *mddev)
7447  {
7448 +	mddev->ready = 0;
7449  	mddev->pers->stop(mddev);
7450  	if (mddev->pers->sync_request && mddev->to_remove == NULL)
7451  		mddev->to_remove = &md_redundancy_group;
7452 @@ -5148,17 +5156,21 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
7453  				PTR_ERR(rdev));
7454  			return PTR_ERR(rdev);
7455  		}
7456 -		/* set save_raid_disk if appropriate */
7457 +		/* set saved_raid_disk if appropriate */
7458  		if (!mddev->persistent) {
7459  			if (info->state & (1<<MD_DISK_SYNC)  &&
7460 -			    info->raid_disk < mddev->raid_disks)
7461 +			    info->raid_disk < mddev->raid_disks) {
7462  				rdev->raid_disk = info->raid_disk;
7463 -			else
7464 +				set_bit(In_sync, &rdev->flags);
7465 +			} else
7466  				rdev->raid_disk = -1;
7467  		} else
7468  			super_types[mddev->major_version].
7469  				validate_super(mddev, rdev);
7470 -		rdev->saved_raid_disk = rdev->raid_disk;
7471 +		if (test_bit(In_sync, &rdev->flags))
7472 +			rdev->saved_raid_disk = rdev->raid_disk;
7473 +		else
7474 +			rdev->saved_raid_disk = -1;
7475  
7476  		clear_bit(In_sync, &rdev->flags); /* just to be sure */
7477  		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
7478 @@ -6036,8 +6048,8 @@ static int md_thread(void * arg)
7479  			 thread->timeout);
7480  
7481  		clear_bit(THREAD_WAKEUP, &thread->flags);
7482 -
7483 -		thread->run(thread->mddev);
7484 +		if (!kthread_should_stop())
7485 +			thread->run(thread->mddev);
7486  	}
7487  
7488  	return 0;
7489 diff --git a/drivers/md/md.h b/drivers/md/md.h
7490 index 3931299..563ede3 100644
7491 --- a/drivers/md/md.h
7492 +++ b/drivers/md/md.h
7493 @@ -149,7 +149,8 @@ struct mddev_s
7494  						       * are happening, so run/
7495  						       * takeover/stop are not safe
7496  						       */
7497 -
7498 +	int				ready; /* See when safe to pass
7499 +						* IO requests down */
7500  	struct gendisk			*gendisk;
7501  
7502  	struct kobject			kobj;
7503 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
7504 index 0b830bb..d8b2d7b 100644
7505 --- a/drivers/md/raid1.c
7506 +++ b/drivers/md/raid1.c
7507 @@ -1210,6 +1210,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
7508  		 * is not possible.
7509  		 */
7510  		if (!test_bit(Faulty, &rdev->flags) &&
7511 +		    !mddev->recovery_disabled &&
7512  		    mddev->degraded < conf->raid_disks) {
7513  			err = -EBUSY;
7514  			goto abort;
7515 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
7516 index 8471838..838c275 100644
7517 --- a/drivers/md/raid10.c
7518 +++ b/drivers/md/raid10.c
7519 @@ -2396,13 +2396,13 @@ static int run(mddev_t *mddev)
7520  	return 0;
7521  
7522  out_free_conf:
7523 +	md_unregister_thread(mddev->thread);
7524  	if (conf->r10bio_pool)
7525  		mempool_destroy(conf->r10bio_pool);
7526  	safe_put_page(conf->tmppage);
7527  	kfree(conf->mirrors);
7528  	kfree(conf);
7529  	mddev->private = NULL;
7530 -	md_unregister_thread(mddev->thread);
7531  out:
7532  	return -EIO;
7533  }
7534 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
7535 index 05bde9c..1d1d8d2 100644
7536 --- a/drivers/media/common/saa7146_hlp.c
7537 +++ b/drivers/media/common/saa7146_hlp.c
7538 @@ -558,7 +558,7 @@ static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, e
7539  static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat)
7540  {
7541  	struct saa7146_vv *vv = dev->vv_data;
7542 -	struct saa7146_format *sfmt = format_by_fourcc(dev, pixelformat);
7543 +	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat);
7544  
7545  	int b_depth = vv->ov_fmt->depth;
7546  	int b_bpl = vv->ov_fb.fmt.bytesperline;
7547 @@ -702,7 +702,7 @@ static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa71
7548  	struct saa7146_vv *vv = dev->vv_data;
7549  	struct saa7146_video_dma vdma1;
7550  
7551 -	struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
7552 +	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
7553  
7554  	int width = buf->fmt->width;
7555  	int height = buf->fmt->height;
7556 @@ -827,7 +827,7 @@ static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa71
7557  	struct saa7146_video_dma vdma2;
7558  	struct saa7146_video_dma vdma3;
7559  
7560 -	struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
7561 +	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
7562  
7563  	int width = buf->fmt->width;
7564  	int height = buf->fmt->height;
7565 @@ -994,7 +994,7 @@ static void program_capture_engine(struct saa7146_dev *dev, int planar)
7566  
7567  void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
7568  {
7569 -	struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
7570 +	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
7571  	struct saa7146_vv *vv = dev->vv_data;
7572  	u32 vdma1_prot_addr;
7573  
7574 diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
7575 index a212a91..5f01da1 100644
7576 --- a/drivers/media/common/saa7146_video.c
7577 +++ b/drivers/media/common/saa7146_video.c
7578 @@ -84,7 +84,7 @@ static struct saa7146_format formats[] = {
7579  
7580  static int NUM_FORMATS = sizeof(formats)/sizeof(struct saa7146_format);
7581  
7582 -struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc)
7583 +struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc)
7584  {
7585  	int i, j = NUM_FORMATS;
7586  
7587 @@ -266,7 +266,7 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
7588  	struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
7589  	struct scatterlist *list = dma->sglist;
7590  	int length = dma->sglen;
7591 -	struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
7592 +	struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
7593  
7594  	DEB_EE(("dev:%p, buf:%p, sg_len:%d\n",dev,buf,length));
7595  
7596 @@ -408,7 +408,7 @@ static int video_begin(struct saa7146_fh *fh)
7597  		}
7598  	}
7599  
7600 -	fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat);
7601 +	fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
7602  	/* we need to have a valid format set here */
7603  	BUG_ON(NULL == fmt);
7604  
7605 @@ -460,7 +460,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
7606  		return -EBUSY;
7607  	}
7608  
7609 -	fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat);
7610 +	fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
7611  	/* we need to have a valid format set here */
7612  	BUG_ON(NULL == fmt);
7613  
7614 @@ -536,7 +536,7 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
7615  		return -EPERM;
7616  
7617  	/* check args */
7618 -	fmt = format_by_fourcc(dev, fb->fmt.pixelformat);
7619 +	fmt = saa7146_format_by_fourcc(dev, fb->fmt.pixelformat);
7620  	if (NULL == fmt)
7621  		return -EINVAL;
7622  
7623 @@ -760,7 +760,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_forma
7624  
7625  	DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh));
7626  
7627 -	fmt = format_by_fourcc(dev, f->fmt.pix.pixelformat);
7628 +	fmt = saa7146_format_by_fourcc(dev, f->fmt.pix.pixelformat);
7629  	if (NULL == fmt)
7630  		return -EINVAL;
7631  
7632 @@ -1264,7 +1264,7 @@ static int buffer_prepare(struct videobuf_queue *q,
7633  		buf->fmt       = &fh->video_fmt;
7634  		buf->vb.field  = fh->video_fmt.field;
7635  
7636 -		sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);
7637 +		sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
7638  
7639  		release_all_pagetables(dev, buf);
7640  		if( 0 != IS_PLANAR(sfmt->trans)) {
7641 @@ -1378,7 +1378,7 @@ static int video_open(struct saa7146_dev *dev, struct file *file)
7642  	fh->video_fmt.pixelformat = V4L2_PIX_FMT_BGR24;
7643  	fh->video_fmt.bytesperline = 0;
7644  	fh->video_fmt.field = V4L2_FIELD_ANY;
7645 -	sfmt = format_by_fourcc(dev,fh->video_fmt.pixelformat);
7646 +	sfmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
7647  	fh->video_fmt.sizeimage = (fh->video_fmt.width * fh->video_fmt.height * sfmt->depth)/8;
7648  
7649  	videobuf_queue_sg_init(&fh->video_q, &video_qops,
7650 diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
7651 index 5bf4985..1944814 100644
7652 --- a/drivers/media/radio/radio-aimslab.c
7653 +++ b/drivers/media/radio/radio-aimslab.c
7654 @@ -31,7 +31,6 @@
7655  #include <linux/module.h>	/* Modules 			*/
7656  #include <linux/init.h>		/* Initdata			*/
7657  #include <linux/ioport.h>	/* request_region		*/
7658 -#include <linux/delay.h>	/* udelay			*/
7659  #include <linux/videodev2.h>	/* kernel radio structs		*/
7660  #include <linux/version.h>	/* for KERNEL_VERSION MACRO	*/
7661  #include <linux/io.h>		/* outb, outb_p			*/
7662 @@ -71,27 +70,17 @@ static struct rtrack rtrack_card;
7663  
7664  /* local things */
7665  
7666 -static void sleep_delay(long n)
7667 -{
7668 -	/* Sleep nicely for 'n' uS */
7669 -	int d = n / msecs_to_jiffies(1000);
7670 -	if (!d)
7671 -		udelay(n);
7672 -	else
7673 -		msleep(jiffies_to_msecs(d));
7674 -}
7675 -
7676  static void rt_decvol(struct rtrack *rt)
7677  {
7678  	outb(0x58, rt->io);		/* volume down + sigstr + on	*/
7679 -	sleep_delay(100000);
7680 +	msleep(100);
7681  	outb(0xd8, rt->io);		/* volume steady + sigstr + on	*/
7682  }
7683  
7684  static void rt_incvol(struct rtrack *rt)
7685  {
7686  	outb(0x98, rt->io);		/* volume up + sigstr + on	*/
7687 -	sleep_delay(100000);
7688 +	msleep(100);
7689  	outb(0xd8, rt->io);		/* volume steady + sigstr + on	*/
7690  }
7691  
7692 @@ -120,7 +109,7 @@ static int rt_setvol(struct rtrack *rt, int vol)
7693  
7694  	if (vol == 0) {			/* volume = 0 means mute the card */
7695  		outb(0x48, rt->io);	/* volume down but still "on"	*/
7696 -		sleep_delay(2000000);	/* make sure it's totally down	*/
7697 +		msleep(2000);	/* make sure it's totally down	*/
7698  		outb(0xd0, rt->io);	/* volume steady, off		*/
7699  		rt->curvol = 0;		/* track the volume state!	*/
7700  		mutex_unlock(&rt->lock);
7701 @@ -155,7 +144,7 @@ static void send_0_byte(struct rtrack *rt)
7702  		outb_p(128+64+16+8+  1, rt->io);  /* on + wr-enable + data low */
7703  		outb_p(128+64+16+8+2+1, rt->io);  /* clock */
7704  	}
7705 -	sleep_delay(1000);
7706 +	msleep(1);
7707  }
7708  
7709  static void send_1_byte(struct rtrack *rt)
7710 @@ -169,7 +158,7 @@ static void send_1_byte(struct rtrack *rt)
7711  		outb_p(128+64+16+8+4+2+1, rt->io); /* clock */
7712  	}
7713  
7714 -	sleep_delay(1000);
7715 +	msleep(1);
7716  }
7717  
7718  static int rt_setfreq(struct rtrack *rt, unsigned long freq)
7719 @@ -427,7 +416,7 @@ static int __init rtrack_init(void)
7720  
7721  	/* this ensures that the volume is all the way down  */
7722  	outb(0x48, rt->io);		/* volume down but still "on"	*/
7723 -	sleep_delay(2000000);	/* make sure it's totally down	*/
7724 +	msleep(2000);	/* make sure it's totally down	*/
7725  	outb(0xc0, rt->io);		/* steady volume, mute card	*/
7726  
7727  	return 0;
7728 diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
7729 index f6b62e7..11c987e 100644
7730 --- a/drivers/media/video/cx23885/cx23885-core.c
7731 +++ b/drivers/media/video/cx23885/cx23885-core.c
7732 @@ -815,6 +815,7 @@ static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
7733  	case 0x0e:
7734  		/* CX23887-15Z */
7735  		dev->hwrevision = 0xc0;
7736 +		break;
7737  	case 0x0f:
7738  		/* CX23887-14Z */
7739  		dev->hwrevision = 0xb1;
7740 diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
7741 index e7efb4b..6e80376 100644
7742 --- a/drivers/media/video/em28xx/em28xx-cards.c
7743 +++ b/drivers/media/video/em28xx/em28xx-cards.c
7744 @@ -1621,11 +1621,11 @@ struct em28xx_board em28xx_boards[] = {
7745  		.input           = { {
7746  			.type     = EM28XX_VMUX_COMPOSITE1,
7747  			.vmux     = SAA7115_COMPOSITE0,
7748 -			.amux     = EM28XX_AMUX_VIDEO2,
7749 +			.amux     = EM28XX_AMUX_LINE_IN,
7750  		}, {
7751  			.type     = EM28XX_VMUX_SVIDEO,
7752  			.vmux     = SAA7115_SVIDEO3,
7753 -			.amux     = EM28XX_AMUX_VIDEO2,
7754 +			.amux     = EM28XX_AMUX_LINE_IN,
7755  		} },
7756  	},
7757  	[EM2860_BOARD_TERRATEC_AV350] = {
7758 diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
7759 index 78abc1c..a50bf65 100644
7760 --- a/drivers/media/video/gspca/gspca.c
7761 +++ b/drivers/media/video/gspca/gspca.c
7762 @@ -652,7 +652,7 @@ static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev)
7763  				   : USB_ENDPOINT_XFER_ISOC;
7764  	i = gspca_dev->alt;			/* previous alt setting */
7765  	if (gspca_dev->cam.reverse_alts) {
7766 -		if (gspca_dev->audio)
7767 +		if (gspca_dev->audio && i < gspca_dev->nbalt - 2)
7768  			i++;
7769  		while (++i < gspca_dev->nbalt) {
7770  			ep = alt_xfer(&intf->altsetting[i], xfer);
7771 @@ -660,7 +660,7 @@ static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev)
7772  				break;
7773  		}
7774  	} else {
7775 -		if (gspca_dev->audio)
7776 +		if (gspca_dev->audio && i > 1)
7777  			i--;
7778  		while (--i >= 0) {
7779  			ep = alt_xfer(&intf->altsetting[i], xfer);
7780 diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
7781 index 3705443..248c2e6 100644
7782 --- a/drivers/media/video/gspca/sonixj.c
7783 +++ b/drivers/media/video/gspca/sonixj.c
7784 @@ -56,6 +56,7 @@ struct sd {
7785  	u8 jpegqual;			/* webcam quality */
7786  
7787  	u8 reg18;
7788 +	u8 flags;
7789  
7790  	s8 ag_cnt;
7791  #define AG_CNT_START 13
7792 @@ -87,6 +88,9 @@ enum sensors {
7793  	SENSOR_SP80708,
7794  };
7795  
7796 +/* device flags */
7797 +#define PDN_INV	1		/* inverse pin S_PWR_DN / sn_xxx tables */
7798 +
7799  /* V4L2 controls supported by the driver */
7800  static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
7801  static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
7802 @@ -1777,7 +1781,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
7803  	struct cam *cam;
7804  
7805  	sd->bridge = id->driver_info >> 16;
7806 -	sd->sensor = id->driver_info;
7807 +	sd->sensor = id->driver_info >> 8;
7808 +	sd->flags = id->driver_info;
7809  
7810  	cam = &gspca_dev->cam;
7811  	if (sd->sensor == SENSOR_ADCM1700) {
7812 @@ -2474,8 +2479,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
7813  		reg1 = 0x44;
7814  		reg17 = 0xa2;
7815  		break;
7816 -	default:
7817 -/*	case SENSOR_SP80708: */
7818 +	case SENSOR_SP80708:
7819  		init = sp80708_sensor_param1;
7820  		if (mode) {
7821  /*??			reg1 = 0x04;	 * 320 clk 48Mhz */
7822 @@ -2985,14 +2989,18 @@ static const struct sd_desc sd_desc = {
7823  /* -- module initialisation -- */
7824  #define BS(bridge, sensor) \
7825  	.driver_info = (BRIDGE_ ## bridge << 16) \
7826 -			| SENSOR_ ## sensor
7827 +			| (SENSOR_ ## sensor << 8)
7828 +#define BSF(bridge, sensor, flags) \
7829 +	.driver_info = (BRIDGE_ ## bridge << 16) \
7830 +			| (SENSOR_ ## sensor << 8) \
7831 +			| (flags)
7832  static const __devinitdata struct usb_device_id device_table[] = {
7833  #if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
7834  	{USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
7835  	{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
7836  #endif
7837 -	{USB_DEVICE(0x045e, 0x00f5), BS(SN9C105, OV7660)},
7838 -	{USB_DEVICE(0x045e, 0x00f7), BS(SN9C105, OV7660)},
7839 +	{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)},
7840 +	{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)},
7841  	{USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)},
7842  	{USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)},
7843  	{USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
7844 diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
7845 index 4863a21..93f7959 100644
7846 --- a/drivers/media/video/hdpvr/hdpvr-video.c
7847 +++ b/drivers/media/video/hdpvr/hdpvr-video.c
7848 @@ -157,6 +157,7 @@ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count)
7849  				  mem, dev->bulk_in_size,
7850  				  hdpvr_read_bulk_callback, buf);
7851  
7852 +		buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
7853  		buf->status = BUFSTAT_AVAILABLE;
7854  		list_add_tail(&buf->buff_list, &dev->free_buff_list);
7855  	}
7856 diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
7857 index 0e41213..4897d90f 100644
7858 --- a/drivers/media/video/msp3400-driver.c
7859 +++ b/drivers/media/video/msp3400-driver.c
7860 @@ -382,7 +382,12 @@ static int msp_s_ctrl(struct v4l2_ctrl *ctrl)
7861  
7862  void msp_update_volume(struct msp_state *state)
7863  {
7864 -	v4l2_ctrl_s_ctrl(state->volume, v4l2_ctrl_g_ctrl(state->volume));
7865 +	/* Force an update of the volume/mute cluster */
7866 +	v4l2_ctrl_lock(state->volume);
7867 +	state->volume->val = state->volume->cur.val;
7868 +	state->muted->val = state->muted->cur.val;
7869 +	msp_s_ctrl(state->volume);
7870 +	v4l2_ctrl_unlock(state->volume);
7871  }
7872  
7873  /* --- v4l2 ioctls --- */
7874 diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
7875 index b6ea672..15cb5b3 100644
7876 --- a/drivers/media/video/mx2_camera.c
7877 +++ b/drivers/media/video/mx2_camera.c
7878 @@ -791,8 +791,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd,
7879  
7880  	if (common_flags & SOCAM_PCLK_SAMPLE_RISING)
7881  		csicr1 |= CSICR1_REDGE;
7882 -	if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
7883 -		csicr1 |= CSICR1_INV_PCLK;
7884  	if (common_flags & SOCAM_VSYNC_ACTIVE_HIGH)
7885  		csicr1 |= CSICR1_SOF_POL;
7886  	if (common_flags & SOCAM_HSYNC_ACTIVE_HIGH)
7887 diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
7888 index bb8d83d..7c74751 100644
7889 --- a/drivers/media/video/saa7134/saa7134-cards.c
7890 +++ b/drivers/media/video/saa7134/saa7134-cards.c
7891 @@ -6661,6 +6661,18 @@ struct pci_device_id saa7134_pci_tbl[] = {
7892  		.subdevice    = 0x2804,
7893  		.driver_data  = SAA7134_BOARD_TECHNOTREND_BUDGET_T3000,
7894  	}, {
7895 +		.vendor       = PCI_VENDOR_ID_PHILIPS,
7896 +		.device       = PCI_DEVICE_ID_PHILIPS_SAA7133,
7897 +		.subvendor    = 0x5ace, /* Beholder Intl. Ltd. */
7898 +		.subdevice    = 0x7190,
7899 +		.driver_data  = SAA7134_BOARD_BEHOLD_H7,
7900 +	}, {
7901 +		.vendor       = PCI_VENDOR_ID_PHILIPS,
7902 +		.device       = PCI_DEVICE_ID_PHILIPS_SAA7133,
7903 +		.subvendor    = 0x5ace, /* Beholder Intl. Ltd. */
7904 +		.subdevice    = 0x7090,
7905 +		.driver_data  = SAA7134_BOARD_BEHOLD_A7,
7906 +	}, {
7907  		/* --- boards without eeprom + subsystem ID --- */
7908  		.vendor       = PCI_VENDOR_ID_PHILIPS,
7909  		.device       = PCI_DEVICE_ID_PHILIPS_SAA7134,
7910 @@ -6698,18 +6710,6 @@ struct pci_device_id saa7134_pci_tbl[] = {
7911  		.subvendor    = PCI_ANY_ID,
7912  		.subdevice    = PCI_ANY_ID,
7913  		.driver_data  = SAA7134_BOARD_UNKNOWN,
7914 -	}, {
7915 -		.vendor       = PCI_VENDOR_ID_PHILIPS,
7916 -		.device       = PCI_DEVICE_ID_PHILIPS_SAA7133,
7917 -		.subvendor    = 0x5ace, /* Beholder Intl. Ltd. */
7918 -		.subdevice    = 0x7190,
7919 -		.driver_data  = SAA7134_BOARD_BEHOLD_H7,
7920 -	}, {
7921 -		.vendor       = PCI_VENDOR_ID_PHILIPS,
7922 -		.device       = PCI_DEVICE_ID_PHILIPS_SAA7133,
7923 -		.subvendor    = 0x5ace, /* Beholder Intl. Ltd. */
7924 -		.subdevice    = 0x7090,
7925 -		.driver_data  = SAA7134_BOARD_BEHOLD_A7,
7926  	},{
7927  		/* --- end of list --- */
7928  	}
7929 diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
7930 index 1e7aaaf..b534716 100644
7931 --- a/drivers/mfd/wm831x-core.c
7932 +++ b/drivers/mfd/wm831x-core.c
7933 @@ -1464,7 +1464,11 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
7934  		dev_err(wm831x->dev, "Failed to read parent ID: %d\n", ret);
7935  		goto err;
7936  	}
7937 -	if (ret != 0x6204) {
7938 +	switch (ret) {
7939 +	case 0x6204:
7940 +	case 0x6246:
7941 +		break;
7942 +	default:
7943  		dev_err(wm831x->dev, "Device is not a WM831x: ID %x\n", ret);
7944  		ret = -EINVAL;
7945  		goto err;
7946 @@ -1617,7 +1621,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
7947  	case WM8321:
7948  		ret = mfd_add_devices(wm831x->dev, -1,
7949  				      wm8320_devs, ARRAY_SIZE(wm8320_devs),
7950 -				      NULL, 0);
7951 +				      NULL, wm831x->irq_base);
7952  		break;
7953  
7954  	default:
7955 diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
7956 index b8c6df9..6cfcb63 100644
7957 --- a/drivers/misc/ad525x_dpot-spi.c
7958 +++ b/drivers/misc/ad525x_dpot-spi.c
7959 @@ -53,13 +53,13 @@ static int write8(void *client, u8 val)
7960  static int write16(void *client, u8 reg, u8 val)
7961  {
7962  	u8 data[2] = {reg, val};
7963 -	return spi_write(client, data, 1);
7964 +	return spi_write(client, data, 2);
7965  }
7966  
7967  static int write24(void *client, u8 reg, u16 val)
7968  {
7969  	u8 data[3] = {reg, val >> 8, val};
7970 -	return spi_write(client, data, 1);
7971 +	return spi_write(client, data, 3);
7972  }
7973  
7974  static int read8(void *client)
7975 diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
7976 index d551f09..6956f7e 100644
7977 --- a/drivers/misc/sgi-xp/xpc_partition.c
7978 +++ b/drivers/misc/sgi-xp/xpc_partition.c
7979 @@ -439,18 +439,23 @@ xpc_discovery(void)
7980  	 * nodes that can comprise an access protection grouping. The access
7981  	 * protection is in regards to memory, IOI and IPI.
7982  	 */
7983 -	max_regions = 64;
7984  	region_size = xp_region_size;
7985  
7986 -	switch (region_size) {
7987 -	case 128:
7988 -		max_regions *= 2;
7989 -	case 64:
7990 -		max_regions *= 2;
7991 -	case 32:
7992 -		max_regions *= 2;
7993 -		region_size = 16;
7994 -		DBUG_ON(!is_shub2());
7995 +	if (is_uv())
7996 +		max_regions = 256;
7997 +	else {
7998 +		max_regions = 64;
7999 +
8000 +		switch (region_size) {
8001 +		case 128:
8002 +			max_regions *= 2;
8003 +		case 64:
8004 +			max_regions *= 2;
8005 +		case 32:
8006 +			max_regions *= 2;
8007 +			region_size = 16;
8008 +			DBUG_ON(!is_shub2());
8009 +		}
8010  	}
8011  
8012  	for (region = 0; region < max_regions; region++) {
8013 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
8014 index 1f59ee2..17bbacb 100644
8015 --- a/drivers/misc/sgi-xp/xpc_uv.c
8016 +++ b/drivers/misc/sgi-xp/xpc_uv.c
8017 @@ -417,6 +417,7 @@ xpc_process_activate_IRQ_rcvd_uv(void)
8018  static void
8019  xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
8020  			      struct xpc_activate_mq_msghdr_uv *msg_hdr,
8021 +			      int part_setup,
8022  			      int *wakeup_hb_checker)
8023  {
8024  	unsigned long irq_flags;
8025 @@ -481,6 +482,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
8026  	case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
8027  		struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
8028  
8029 +		if (!part_setup)
8030 +			break;
8031 +
8032  		msg = container_of(msg_hdr, struct
8033  				   xpc_activate_mq_msg_chctl_closerequest_uv,
8034  				   hdr);
8035 @@ -497,6 +501,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
8036  	case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
8037  		struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
8038  
8039 +		if (!part_setup)
8040 +			break;
8041 +
8042  		msg = container_of(msg_hdr, struct
8043  				   xpc_activate_mq_msg_chctl_closereply_uv,
8044  				   hdr);
8045 @@ -511,6 +518,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
8046  	case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
8047  		struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
8048  
8049 +		if (!part_setup)
8050 +			break;
8051 +
8052  		msg = container_of(msg_hdr, struct
8053  				   xpc_activate_mq_msg_chctl_openrequest_uv,
8054  				   hdr);
8055 @@ -528,6 +538,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
8056  	case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
8057  		struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
8058  
8059 +		if (!part_setup)
8060 +			break;
8061 +
8062  		msg = container_of(msg_hdr, struct
8063  				   xpc_activate_mq_msg_chctl_openreply_uv, hdr);
8064  		args = &part->remote_openclose_args[msg->ch_number];
8065 @@ -545,6 +558,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
8066  	case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
8067  		struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
8068  
8069 +		if (!part_setup)
8070 +			break;
8071 +
8072  		msg = container_of(msg_hdr, struct
8073  				xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
8074  		spin_lock_irqsave(&part->chctl_lock, irq_flags);
8075 @@ -621,6 +637,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
8076  
8077  			part_referenced = xpc_part_ref(part);
8078  			xpc_handle_activate_mq_msg_uv(part, msg_hdr,
8079 +						      part_referenced,
8080  						      &wakeup_hb_checker);
8081  			if (part_referenced)
8082  				xpc_part_deref(part);
8083 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
8084 index 09eee6d..9944542 100644
8085 --- a/drivers/mmc/core/core.c
8086 +++ b/drivers/mmc/core/core.c
8087 @@ -1514,7 +1514,7 @@ void mmc_stop_host(struct mmc_host *host)
8088  
8089  	if (host->caps & MMC_CAP_DISABLE)
8090  		cancel_delayed_work(&host->disable);
8091 -	cancel_delayed_work(&host->detect);
8092 +	cancel_delayed_work_sync(&host->detect);
8093  	mmc_flush_scheduled_work();
8094  
8095  	/* clear pm flags now and let card drivers set them as needed */
8096 @@ -1720,6 +1720,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
8097  
8098  	case PM_POST_SUSPEND:
8099  	case PM_POST_HIBERNATION:
8100 +	case PM_POST_RESTORE:
8101  
8102  		spin_lock_irqsave(&host->lock, flags);
8103  		host->rescan_disable = 0;
8104 diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
8105 index 87226cd..57bb421 100644
8106 --- a/drivers/mmc/host/at91_mci.c
8107 +++ b/drivers/mmc/host/at91_mci.c
8108 @@ -69,6 +69,7 @@
8109  #include <linux/highmem.h>
8110  
8111  #include <linux/mmc/host.h>
8112 +#include <linux/mmc/sdio.h>
8113  
8114  #include <asm/io.h>
8115  #include <asm/irq.h>
8116 @@ -493,10 +494,14 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
8117  		else if (data->flags & MMC_DATA_WRITE)
8118  			cmdr |= AT91_MCI_TRCMD_START;
8119  
8120 -		if (data->flags & MMC_DATA_STREAM)
8121 -			cmdr |= AT91_MCI_TRTYP_STREAM;
8122 -		if (data->blocks > 1)
8123 -			cmdr |= AT91_MCI_TRTYP_MULTIPLE;
8124 +		if (cmd->opcode == SD_IO_RW_EXTENDED) {
8125 +			cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK;
8126 +		} else {
8127 +			if (data->flags & MMC_DATA_STREAM)
8128 +				cmdr |= AT91_MCI_TRTYP_STREAM;
8129 +			if (data->blocks > 1)
8130 +				cmdr |= AT91_MCI_TRTYP_MULTIPLE;
8131 +		}
8132  	}
8133  	else {
8134  		block_length = 0;
8135 diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
8136 index 95ef864..3a569bf 100644
8137 --- a/drivers/mmc/host/atmel-mci.c
8138 +++ b/drivers/mmc/host/atmel-mci.c
8139 @@ -26,6 +26,7 @@
8140  #include <linux/stat.h>
8141  
8142  #include <linux/mmc/host.h>
8143 +#include <linux/mmc/sdio.h>
8144  
8145  #include <mach/atmel-mci.h>
8146  #include <linux/atmel-mci.h>
8147 @@ -532,12 +533,17 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
8148  	data = cmd->data;
8149  	if (data) {
8150  		cmdr |= MCI_CMDR_START_XFER;
8151 -		if (data->flags & MMC_DATA_STREAM)
8152 -			cmdr |= MCI_CMDR_STREAM;
8153 -		else if (data->blocks > 1)
8154 -			cmdr |= MCI_CMDR_MULTI_BLOCK;
8155 -		else
8156 -			cmdr |= MCI_CMDR_BLOCK;
8157 +
8158 +		if (cmd->opcode == SD_IO_RW_EXTENDED) {
8159 +			cmdr |= MCI_CMDR_SDIO_BLOCK;
8160 +		} else {
8161 +			if (data->flags & MMC_DATA_STREAM)
8162 +				cmdr |= MCI_CMDR_STREAM;
8163 +			else if (data->blocks > 1)
8164 +				cmdr |= MCI_CMDR_MULTI_BLOCK;
8165 +			else
8166 +				cmdr |= MCI_CMDR_BLOCK;
8167 +		}
8168  
8169  		if (data->flags & MMC_DATA_READ)
8170  			cmdr |= MCI_CMDR_TRDIR_READ;
8171 diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
8172 index 4b0e677..5164edf 100644
8173 --- a/drivers/mmc/host/bfin_sdh.c
8174 +++ b/drivers/mmc/host/bfin_sdh.c
8175 @@ -462,7 +462,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
8176  		goto out;
8177  	}
8178  
8179 -	mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
8180 +	mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
8181  	if (!mmc) {
8182  		ret = -ENOMEM;
8183  		goto out;
8184 diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
8185 index 4a4f6b8..70609ee 100644
8186 --- a/drivers/net/8139cp.c
8187 +++ b/drivers/net/8139cp.c
8188 @@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
8189  {
8190  	unsigned int protocol = (status >> 16) & 0x3;
8191  
8192 -	if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
8193 +	if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
8194 +	    ((protocol == RxProtoUDP) && !(status & UDPFail)))
8195  		return 1;
8196 -	else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
8197 -		return 1;
8198 -	else if ((protocol == RxProtoIP) && (!(status & IPFail)))
8199 -		return 1;
8200 -	return 0;
8201 +	else
8202 +		return 0;
8203  }
8204  
8205  static int cp_rx_poll(struct napi_struct *napi, int budget)
8206 diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
8207 index c73be28..67f75c1 100644
8208 --- a/drivers/net/atlx/atl1.c
8209 +++ b/drivers/net/atlx/atl1.c
8210 @@ -3503,6 +3503,8 @@ static int atl1_set_ringparam(struct net_device *netdev,
8211  	struct atl1_rfd_ring rfd_old, rfd_new;
8212  	struct atl1_rrd_ring rrd_old, rrd_new;
8213  	struct atl1_ring_header rhdr_old, rhdr_new;
8214 +	struct atl1_smb smb;
8215 +	struct atl1_cmb cmb;
8216  	int err;
8217  
8218  	tpd_old = adapter->tpd_ring;
8219 @@ -3543,11 +3545,19 @@ static int atl1_set_ringparam(struct net_device *netdev,
8220  		adapter->rrd_ring = rrd_old;
8221  		adapter->tpd_ring = tpd_old;
8222  		adapter->ring_header = rhdr_old;
8223 +		/*
8224 +		 * Save SMB and CMB, since atl1_free_ring_resources
8225 +		 * will clear them.
8226 +		 */
8227 +		smb = adapter->smb;
8228 +		cmb = adapter->cmb;
8229  		atl1_free_ring_resources(adapter);
8230  		adapter->rfd_ring = rfd_new;
8231  		adapter->rrd_ring = rrd_new;
8232  		adapter->tpd_ring = tpd_new;
8233  		adapter->ring_header = rhdr_new;
8234 +		adapter->smb = smb;
8235 +		adapter->cmb = cmb;
8236  
8237  		err = atl1_up(adapter);
8238  		if (err)
8239 diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
8240 index 34abcc9..dc913b9 100644
8241 --- a/drivers/net/benet/be_cmds.c
8242 +++ b/drivers/net/benet/be_cmds.c
8243 @@ -1179,7 +1179,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
8244  
8245  		i = 0;
8246  		netdev_for_each_mc_addr(ha, netdev)
8247 -			memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
8248 +			memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
8249  	} else {
8250  		req->promiscuous = 1;
8251  	}
8252 diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
8253 index c6fdd85..c867cf9 100644
8254 --- a/drivers/net/bonding/bonding.h
8255 +++ b/drivers/net/bonding/bonding.h
8256 @@ -240,11 +240,11 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
8257  
8258  	bond_for_each_slave(bond, slave, i) {
8259  		if (slave->dev == slave_dev) {
8260 -			break;
8261 +			return slave;
8262  		}
8263  	}
8264  
8265 -	return slave;
8266 +	return 0;
8267  }
8268  
8269  static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
8270 diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
8271 index 5cc39ed..7132428 100644
8272 --- a/drivers/net/e1000/e1000_main.c
8273 +++ b/drivers/net/e1000/e1000_main.c
8274 @@ -31,7 +31,7 @@
8275  
8276  char e1000_driver_name[] = "e1000";
8277  static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
8278 -#define DRV_VERSION "7.3.21-k6-NAPI"
8279 +#define DRV_VERSION "7.3.21-k8-NAPI"
8280  const char e1000_driver_version[] = DRV_VERSION;
8281  static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
8282  
8283 @@ -483,9 +483,6 @@ void e1000_down(struct e1000_adapter *adapter)
8284  	struct net_device *netdev = adapter->netdev;
8285  	u32 rctl, tctl;
8286  
8287 -	/* signal that we're down so the interrupt handler does not
8288 -	 * reschedule our watchdog timer */
8289 -	set_bit(__E1000_DOWN, &adapter->flags);
8290  
8291  	/* disable receives in the hardware */
8292  	rctl = er32(RCTL);
8293 @@ -506,6 +503,13 @@ void e1000_down(struct e1000_adapter *adapter)
8294  
8295  	e1000_irq_disable(adapter);
8296  
8297 +	/*
8298 +	 * Setting DOWN must be after irq_disable to prevent
8299 +	 * a screaming interrupt.  Setting DOWN also prevents
8300 +	 * timers and tasks from rescheduling.
8301 +	 */
8302 +	set_bit(__E1000_DOWN, &adapter->flags);
8303 +
8304  	del_timer_sync(&adapter->tx_fifo_stall_timer);
8305  	del_timer_sync(&adapter->watchdog_timer);
8306  	del_timer_sync(&adapter->phy_info_timer);
8307 diff --git a/drivers/net/fec.c b/drivers/net/fec.c
8308 index cce32d4..52e9ca8 100644
8309 --- a/drivers/net/fec.c
8310 +++ b/drivers/net/fec.c
8311 @@ -651,8 +651,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
8312  	fep->mii_timeout = 0;
8313  	init_completion(&fep->mdio_done);
8314  
8315 -	/* start a read op */
8316 -	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
8317 +	/* start a write op */
8318 +	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
8319  		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
8320  		FEC_MMFR_TA | FEC_MMFR_DATA(value),
8321  		fep->hwp + FEC_MII_DATA);
8322 diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
8323 index ab9f675..fe337bd 100644
8324 --- a/drivers/net/ifb.c
8325 +++ b/drivers/net/ifb.c
8326 @@ -104,6 +104,8 @@ static void ri_tasklet(unsigned long dev)
8327  			rcu_read_unlock();
8328  			dev_kfree_skb(skb);
8329  			stats->tx_dropped++;
8330 +			if (skb_queue_len(&dp->tq) != 0)
8331 +				goto resched;
8332  			break;
8333  		}
8334  		rcu_read_unlock();
8335 diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
8336 index debeee2..510c726 100644
8337 --- a/drivers/net/igbvf/igbvf.h
8338 +++ b/drivers/net/igbvf/igbvf.h
8339 @@ -126,7 +126,7 @@ struct igbvf_buffer {
8340  			unsigned int page_offset;
8341  		};
8342  	};
8343 -	struct page *page;
8344 +	//struct page *page;
8345  };
8346  
8347  union igbvf_desc {
8348 diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
8349 index e32af43..0e7f086 100644
8350 --- a/drivers/net/ixgbe/ixgbe_main.c
8351 +++ b/drivers/net/ixgbe/ixgbe_main.c
8352 @@ -2651,9 +2651,16 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
8353  	int rx_buf_len;
8354  
8355  	/* Decide whether to use packet split mode or not */
8356 +	/* On by default */
8357 +	adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
8358 +
8359  	/* Do not use packet split if we're in SR-IOV Mode */
8360 -	if (!adapter->num_vfs)
8361 -		adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
8362 +	if (adapter->num_vfs)
8363 +		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
8364 +
8365 +	/* Disable packet split due to 82599 erratum #45 */
8366 +	if (hw->mac.type == ixgbe_mac_82599EB)
8367 +		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
8368  
8369  	/* Set the RX buffer length according to the mode */
8370  	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
8371 diff --git a/drivers/net/jme.c b/drivers/net/jme.c
8372 index 99f24f5..f0643ac 100644
8373 --- a/drivers/net/jme.c
8374 +++ b/drivers/net/jme.c
8375 @@ -1575,6 +1575,16 @@ jme_free_irq(struct jme_adapter *jme)
8376  	}
8377  }
8378  
8379 +static inline void
8380 +jme_phy_on(struct jme_adapter *jme)
8381 +{
8382 +	u32 bmcr;
8383 +
8384 +	bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
8385 +	bmcr &= ~BMCR_PDOWN;
8386 +	jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
8387 +}
8388 +
8389  static int
8390  jme_open(struct net_device *netdev)
8391  {
8392 @@ -1595,10 +1605,12 @@ jme_open(struct net_device *netdev)
8393  
8394  	jme_start_irq(jme);
8395  
8396 -	if (test_bit(JME_FLAG_SSET, &jme->flags))
8397 +	if (test_bit(JME_FLAG_SSET, &jme->flags)) {
8398 +		jme_phy_on(jme);
8399  		jme_set_settings(netdev, &jme->old_ecmd);
8400 -	else
8401 +	} else {
8402  		jme_reset_phy_processor(jme);
8403 +	}
8404  
8405  	jme_reset_link(jme);
8406  
8407 @@ -3006,10 +3018,12 @@ jme_resume(struct pci_dev *pdev)
8408  	jme_clear_pm(jme);
8409  	pci_restore_state(pdev);
8410  
8411 -	if (test_bit(JME_FLAG_SSET, &jme->flags))
8412 +	if (test_bit(JME_FLAG_SSET, &jme->flags)) {
8413 +		jme_phy_on(jme);
8414  		jme_set_settings(netdev, &jme->old_ecmd);
8415 -	else
8416 +	} else {
8417  		jme_reset_phy_processor(jme);
8418 +	}
8419  
8420  	jme_start_irq(jme);
8421  	netif_device_attach(netdev);
8422 diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
8423 index f9b509a..b12553b 100644
8424 --- a/drivers/net/pcmcia/pcnet_cs.c
8425 +++ b/drivers/net/pcmcia/pcnet_cs.c
8426 @@ -1622,6 +1622,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
8427  	PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9),
8428  	PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
8429  	PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
8430 +	PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
8431  	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
8432  	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
8433  	PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
8434 diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
8435 index 0101f2b..c1ba49b 100644
8436 --- a/drivers/net/phy/marvell.c
8437 +++ b/drivers/net/phy/marvell.c
8438 @@ -196,20 +196,27 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
8439  			MII_88E1121_PHY_MSCR_PAGE);
8440  	if (err < 0)
8441  		return err;
8442 -	mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
8443 -		MII_88E1121_PHY_MSCR_DELAY_MASK;
8444  
8445 -	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
8446 -		mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
8447 -			 MII_88E1121_PHY_MSCR_TX_DELAY);
8448 -	else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
8449 -		mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
8450 -	else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
8451 -		mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
8452 +	if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
8453 +	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
8454 +	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
8455 +	    (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
8456  
8457 -	err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
8458 -	if (err < 0)
8459 -		return err;
8460 +		mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
8461 +			MII_88E1121_PHY_MSCR_DELAY_MASK;
8462 +
8463 +		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
8464 +			mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
8465 +				 MII_88E1121_PHY_MSCR_TX_DELAY);
8466 +		else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
8467 +			mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
8468 +		else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
8469 +			mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
8470 +
8471 +		err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
8472 +		if (err < 0)
8473 +			return err;
8474 +	}
8475  
8476  	phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
8477  
8478 diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
8479 index c07de35..6a3eae2 100644
8480 --- a/drivers/net/pppoe.c
8481 +++ b/drivers/net/pppoe.c
8482 @@ -948,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
8483  
8484  abort:
8485  	kfree_skb(skb);
8486 -	return 0;
8487 +	return 1;
8488  }
8489  
8490  /************************************************************************
8491 diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
8492 index 142c381..80666f0 100644
8493 --- a/drivers/net/r6040.c
8494 +++ b/drivers/net/r6040.c
8495 @@ -893,16 +893,18 @@ static void r6040_multicast_list(struct net_device *dev)
8496  	/* Multicast Address 1~4 case */
8497  	i = 0;
8498  	netdev_for_each_mc_addr(ha, dev) {
8499 -		if (i < MCAST_MAX) {
8500 -			adrp = (u16 *) ha->addr;
8501 -			iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
8502 -			iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
8503 -			iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
8504 -		} else {
8505 -			iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
8506 -			iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
8507 -			iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
8508 -		}
8509 +		if (i >= MCAST_MAX)
8510 +			break;
8511 +		adrp = (u16 *) ha->addr;
8512 +		iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
8513 +		iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
8514 +		iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
8515 +		i++;
8516 +	}
8517 +	while (i < MCAST_MAX) {
8518 +		iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
8519 +		iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
8520 +		iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
8521  		i++;
8522  	}
8523  }
8524 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
8525 index 992db2f..ed65e4a 100644
8526 --- a/drivers/net/r8169.c
8527 +++ b/drivers/net/r8169.c
8528 @@ -855,10 +855,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8529  	else
8530  		tp->features &= ~RTL_FEATURE_WOL;
8531  	__rtl8169_set_wol(tp, wol->wolopts);
8532 -	device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
8533 -
8534  	spin_unlock_irq(&tp->lock);
8535  
8536 +	device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
8537 +
8538  	return 0;
8539  }
8540  
8541 @@ -2936,7 +2936,7 @@ static const struct rtl_cfg_info {
8542  		.hw_start	= rtl_hw_start_8168,
8543  		.region		= 2,
8544  		.align		= 8,
8545 -		.intr_event	= SYSErr | RxFIFOOver | LinkChg | RxOverflow |
8546 +		.intr_event	= SYSErr | LinkChg | RxOverflow |
8547  				  TxErr | TxOK | RxOK | RxErr,
8548  		.napi_event	= TxErr | TxOK | RxOK | RxOverflow,
8549  		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI,
8550 @@ -4455,14 +4455,12 @@ static inline int rtl8169_fragmented_frame(u32 status)
8551  	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
8552  }
8553  
8554 -static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
8555 +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
8556  {
8557 -	u32 opts1 = le32_to_cpu(desc->opts1);
8558  	u32 status = opts1 & RxProtoMask;
8559  
8560  	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
8561 -	    ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
8562 -	    ((status == RxProtoIP) && !(opts1 & IPFail)))
8563 +	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
8564  		skb->ip_summed = CHECKSUM_UNNECESSARY;
8565  	else
8566  		skb->ip_summed = CHECKSUM_NONE;
8567 @@ -4551,8 +4549,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
8568  				continue;
8569  			}
8570  
8571 -			rtl8169_rx_csum(skb, desc);
8572 -
8573  			if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
8574  				dma_sync_single_for_device(&pdev->dev, addr,
8575  					pkt_size, PCI_DMA_FROMDEVICE);
8576 @@ -4563,6 +4559,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
8577  				tp->Rx_skbuff[entry] = NULL;
8578  			}
8579  
8580 +			rtl8169_rx_csum(skb, status);
8581  			skb_put(skb, pkt_size);
8582  			skb->protocol = eth_type_trans(skb, dev);
8583  
8584 @@ -4630,7 +4627,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
8585  		}
8586  
8587  		/* Work around for rx fifo overflow */
8588 -		if (unlikely(status & RxFIFOOver)) {
8589 +		if (unlikely(status & RxFIFOOver) &&
8590 +		(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
8591  			netif_stop_queue(dev);
8592  			rtl8169_tx_timeout(dev);
8593  			break;
8594 @@ -4891,6 +4889,9 @@ static int rtl8169_resume(struct device *device)
8595  {
8596  	struct pci_dev *pdev = to_pci_dev(device);
8597  	struct net_device *dev = pci_get_drvdata(pdev);
8598 +	struct rtl8169_private *tp = netdev_priv(dev);
8599 +
8600 +	rtl8169_init_phy(dev, tp);
8601  
8602  	if (netif_running(dev))
8603  		__rtl8169_resume(dev);
8604 @@ -4931,6 +4932,8 @@ static int rtl8169_runtime_resume(struct device *device)
8605  	tp->saved_wolopts = 0;
8606  	spin_unlock_irq(&tp->lock);
8607  
8608 +	rtl8169_init_phy(dev, tp);
8609 +
8610  	__rtl8169_resume(dev);
8611  
8612  	return 0;
8613 diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
8614 index 737df60..2ce585a 100644
8615 --- a/drivers/net/tehuti.c
8616 +++ b/drivers/net/tehuti.c
8617 @@ -324,7 +324,7 @@ static int bdx_fw_load(struct bdx_priv *priv)
8618  	ENTER;
8619  	master = READ_REG(priv, regINIT_SEMAPHORE);
8620  	if (!READ_REG(priv, regINIT_STATUS) && master) {
8621 -		rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev);
8622 +		rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
8623  		if (rc)
8624  			goto out;
8625  		bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
8626 @@ -2516,4 +2516,4 @@ module_exit(bdx_module_exit);
8627  MODULE_LICENSE("GPL");
8628  MODULE_AUTHOR(DRIVER_AUTHOR);
8629  MODULE_DESCRIPTION(BDX_DRV_DESC);
8630 -MODULE_FIRMWARE("tehuti/firmware.bin");
8631 +MODULE_FIRMWARE("tehuti/bdx.bin");
8632 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
8633 index ca7fc9d..c04d49e 100644
8634 --- a/drivers/net/usb/usbnet.c
8635 +++ b/drivers/net/usb/usbnet.c
8636 @@ -45,6 +45,7 @@
8637  #include <linux/usb/usbnet.h>
8638  #include <linux/slab.h>
8639  #include <linux/kernel.h>
8640 +#include <linux/pm_runtime.h>
8641  
8642  #define DRIVER_VERSION		"22-Aug-2005"
8643  
8644 @@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
8645  	struct usb_device		*xdev;
8646  	int				status;
8647  	const char			*name;
8648 +	struct usb_driver 	*driver = to_usb_driver(udev->dev.driver);
8649 +
8650 +	/* usbnet already took usb runtime pm, so have to enable the feature
8651 +	 * for usb interface, otherwise usb_autopm_get_interface may return
8652 +	 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
8653 +	 */
8654 +	if (!driver->supports_autosuspend) {
8655 +		driver->supports_autosuspend = 1;
8656 +		pm_runtime_enable(&udev->dev);
8657 +	}
8658  
8659  	name = udev->dev.driver->name;
8660  	info = (struct driver_info *) prod->driver_info;
8661 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
8662 index 4598e9d..65d3d80 100644
8663 --- a/drivers/net/virtio_net.c
8664 +++ b/drivers/net/virtio_net.c
8665 @@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
8666  	}
8667  }
8668  
8669 +static void virtnet_napi_enable(struct virtnet_info *vi)
8670 +{
8671 +	napi_enable(&vi->napi);
8672 +
8673 +	/* If all buffers were filled by other side before we napi_enabled, we
8674 +	 * won't get another interrupt, so process any outstanding packets
8675 +	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
8676 +	 * We synchronize against interrupts via NAPI_STATE_SCHED */
8677 +	if (napi_schedule_prep(&vi->napi)) {
8678 +		virtqueue_disable_cb(vi->rvq);
8679 +		__napi_schedule(&vi->napi);
8680 +	}
8681 +}
8682 +
8683  static void refill_work(struct work_struct *work)
8684  {
8685  	struct virtnet_info *vi;
8686 @@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
8687  	vi = container_of(work, struct virtnet_info, refill.work);
8688  	napi_disable(&vi->napi);
8689  	still_empty = !try_fill_recv(vi, GFP_KERNEL);
8690 -	napi_enable(&vi->napi);
8691 +	virtnet_napi_enable(vi);
8692  
8693  	/* In theory, this can happen: if we don't get any buffers in
8694  	 * we will *never* try to fill again. */
8695 @@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
8696  {
8697  	struct virtnet_info *vi = netdev_priv(dev);
8698  
8699 -	napi_enable(&vi->napi);
8700 -
8701 -	/* If all buffers were filled by other side before we napi_enabled, we
8702 -	 * won't get another interrupt, so process any outstanding packets
8703 -	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
8704 -	 * We synchronize against interrupts via NAPI_STATE_SCHED */
8705 -	if (napi_schedule_prep(&vi->napi)) {
8706 -		virtqueue_disable_cb(vi->rvq);
8707 -		__napi_schedule(&vi->napi);
8708 -	}
8709 +	virtnet_napi_enable(vi);
8710  	return 0;
8711  }
8712  
8713 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
8714 index d32f282..a706202 100644
8715 --- a/drivers/net/wireless/ath/ath.h
8716 +++ b/drivers/net/wireless/ath/ath.h
8717 @@ -119,6 +119,7 @@ struct ath_common {
8718  
8719  	u32 keymax;
8720  	DECLARE_BITMAP(keymap, ATH_KEYMAX);
8721 +	DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
8722  	u8 splitmic;
8723  
8724  	struct ath_regulatory regulatory;
8725 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
8726 index fe7418a..d4ee070 100644
8727 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
8728 +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
8729 @@ -710,10 +710,6 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
8730  
8731  	/* Do NF cal only at longer intervals */
8732  	if (longcal || nfcal_pending) {
8733 -		/* Do periodic PAOffset Cal */
8734 -		ar9002_hw_pa_cal(ah, false);
8735 -		ar9002_hw_olc_temp_compensation(ah);
8736 -
8737  		/*
8738  		 * Get the value from the previous NF cal and update
8739  		 * history buffer.
8740 @@ -728,8 +724,12 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
8741  			ath9k_hw_loadnf(ah, ah->curchan);
8742  		}
8743  
8744 -		if (longcal)
8745 +		if (longcal) {
8746  			ath9k_hw_start_nfcal(ah, false);
8747 +			/* Do periodic PAOffset Cal */
8748 +			ar9002_hw_pa_cal(ah, false);
8749 +			ar9002_hw_olc_temp_compensation(ah);
8750 +		}
8751  	}
8752  
8753  	return iscaldone;
8754 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
8755 index 303c63d..ab1c50b 100644
8756 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
8757 +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
8758 @@ -411,6 +411,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
8759  			val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
8760  		}
8761  
8762 +		if (AR_SREV_9280(ah))
8763 +			val |= AR_WA_BIT22;
8764 +
8765  		if (AR_SREV_9285E_20(ah))
8766  			val |= AR_WA_BIT23;
8767  
8768 @@ -442,9 +445,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
8769  		}
8770  
8771  		/* WAR for ASPM system hang */
8772 -		if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
8773 +		if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
8774  			val |= (AR_WA_BIT6 | AR_WA_BIT7);
8775 -		}
8776  
8777  		if (AR_SREV_9285E_20(ah))
8778  			val |= AR_WA_BIT23;
8779 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
8780 index ec98ab5..a14a5e4 100644
8781 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
8782 +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
8783 @@ -34,6 +34,10 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
8784  
8785  static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
8786  	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
8787 +	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
8788 +	{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
8789 +	{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
8790 +	{0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8791  	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
8792  	{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8793  	{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
8794 @@ -99,6 +103,30 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
8795  	{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
8796  	{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
8797  	{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
8798 +	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8799 +	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8800 +	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8801 +	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8802 +	{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8803 +	{0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
8804 +	{0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
8805 +	{0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
8806 +	{0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
8807 +	{0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
8808 +	{0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
8809 +	{0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
8810 +	{0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
8811 +	{0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
8812 +	{0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
8813 +	{0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
8814 +	{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
8815 +	{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
8816 +	{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
8817 +	{0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8818 +	{0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
8819 +	{0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
8820 +	{0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
8821 +	{0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8822  	{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
8823  	{0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
8824  	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
8825 @@ -118,7 +146,7 @@ static const u32 ar9300Modes_fast_clock_2p2[][3] = {
8826  	{0x00008014, 0x044c044c, 0x08980898},
8827  	{0x0000801c, 0x148ec02b, 0x148ec057},
8828  	{0x00008318, 0x000044c0, 0x00008980},
8829 -	{0x00009e00, 0x03721821, 0x03721821},
8830 +	{0x00009e00, 0x0372131c, 0x0372131c},
8831  	{0x0000a230, 0x0000000b, 0x00000016},
8832  	{0x0000a254, 0x00000898, 0x00001130},
8833  };
8834 @@ -595,15 +623,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
8835  	{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
8836  	{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
8837  	{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
8838 -	{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
8839 -	{0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
8840 +	{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
8841 +	{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
8842  	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
8843  	{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
8844 -	{0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
8845 +	{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
8846  	{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8847  	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
8848  	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
8849  	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
8850 +	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
8851  	{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
8852  	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
8853  	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
8854 @@ -624,16 +653,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
8855  	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
8856  	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
8857  	{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
8858 -	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
8859 +	{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
8860  	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8861  	{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
8862 -	{0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
8863 +	{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
8864  	{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8865  	{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
8866  	{0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
8867  	{0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
8868  	{0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
8869 -	{0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
8870 +	{0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
8871  	{0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8872  	{0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
8873  	{0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
8874 @@ -649,13 +678,13 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
8875  	{0x00009814, 0x9280c00a},
8876  	{0x00009818, 0x00000000},
8877  	{0x0000981c, 0x00020028},
8878 -	{0x00009834, 0x5f3ca3de},
8879 +	{0x00009834, 0x6400a290},
8880  	{0x00009838, 0x0108ecff},
8881  	{0x0000983c, 0x14750600},
8882  	{0x00009880, 0x201fff00},
8883  	{0x00009884, 0x00001042},
8884  	{0x000098a4, 0x00200400},
8885 -	{0x000098b0, 0x52440bbe},
8886 +	{0x000098b0, 0x32840bbe},
8887  	{0x000098d0, 0x004b6a8e},
8888  	{0x000098d4, 0x00000820},
8889  	{0x000098dc, 0x00000000},
8890 @@ -681,7 +710,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
8891  	{0x00009e30, 0x06336f77},
8892  	{0x00009e34, 0x6af6532f},
8893  	{0x00009e38, 0x0cc80c00},
8894 -	{0x00009e3c, 0xcf946222},
8895  	{0x00009e40, 0x0d261820},
8896  	{0x00009e4c, 0x00001004},
8897  	{0x00009e50, 0x00ff03f1},
8898 @@ -694,7 +722,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
8899  	{0x0000a220, 0x00000000},
8900  	{0x0000a224, 0x00000000},
8901  	{0x0000a228, 0x10002310},
8902 -	{0x0000a22c, 0x01036a1e},
8903 +	{0x0000a22c, 0x01036a27},
8904  	{0x0000a23c, 0x00000000},
8905  	{0x0000a244, 0x0c000000},
8906  	{0x0000a2a0, 0x00000001},
8907 @@ -702,10 +730,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
8908  	{0x0000a2c8, 0x00000000},
8909  	{0x0000a2cc, 0x18c43433},
8910  	{0x0000a2d4, 0x00000000},
8911 -	{0x0000a2dc, 0x00000000},
8912 -	{0x0000a2e0, 0x00000000},
8913 -	{0x0000a2e4, 0x00000000},
8914 -	{0x0000a2e8, 0x00000000},
8915  	{0x0000a2ec, 0x00000000},
8916  	{0x0000a2f0, 0x00000000},
8917  	{0x0000a2f4, 0x00000000},
8918 @@ -753,33 +777,17 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
8919  	{0x0000a430, 0x1ce739ce},
8920  	{0x0000a434, 0x00000000},
8921  	{0x0000a438, 0x00001801},
8922 -	{0x0000a43c, 0x00000000},
8923 +	{0x0000a43c, 0x00100000},
8924  	{0x0000a440, 0x00000000},
8925  	{0x0000a444, 0x00000000},
8926  	{0x0000a448, 0x06000080},
8927  	{0x0000a44c, 0x00000001},
8928  	{0x0000a450, 0x00010000},
8929  	{0x0000a458, 0x00000000},
8930 -	{0x0000a600, 0x00000000},
8931 -	{0x0000a604, 0x00000000},
8932 -	{0x0000a608, 0x00000000},
8933 -	{0x0000a60c, 0x00000000},
8934 -	{0x0000a610, 0x00000000},
8935 -	{0x0000a614, 0x00000000},
8936 -	{0x0000a618, 0x00000000},
8937 -	{0x0000a61c, 0x00000000},
8938 -	{0x0000a620, 0x00000000},
8939 -	{0x0000a624, 0x00000000},
8940 -	{0x0000a628, 0x00000000},
8941 -	{0x0000a62c, 0x00000000},
8942 -	{0x0000a630, 0x00000000},
8943 -	{0x0000a634, 0x00000000},
8944 -	{0x0000a638, 0x00000000},
8945 -	{0x0000a63c, 0x00000000},
8946  	{0x0000a640, 0x00000000},
8947  	{0x0000a644, 0x3fad9d74},
8948  	{0x0000a648, 0x0048060a},
8949 -	{0x0000a64c, 0x00000637},
8950 +	{0x0000a64c, 0x00003c37},
8951  	{0x0000a670, 0x03020100},
8952  	{0x0000a674, 0x09080504},
8953  	{0x0000a678, 0x0d0c0b0a},
8954 @@ -802,10 +810,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
8955  	{0x0000a8f4, 0x00000000},
8956  	{0x0000b2d0, 0x00000080},
8957  	{0x0000b2d4, 0x00000000},
8958 -	{0x0000b2dc, 0x00000000},
8959 -	{0x0000b2e0, 0x00000000},
8960 -	{0x0000b2e4, 0x00000000},
8961 -	{0x0000b2e8, 0x00000000},
8962  	{0x0000b2ec, 0x00000000},
8963  	{0x0000b2f0, 0x00000000},
8964  	{0x0000b2f4, 0x00000000},
8965 @@ -820,10 +824,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
8966  	{0x0000b8f4, 0x00000000},
8967  	{0x0000c2d0, 0x00000080},
8968  	{0x0000c2d4, 0x00000000},
8969 -	{0x0000c2dc, 0x00000000},
8970 -	{0x0000c2e0, 0x00000000},
8971 -	{0x0000c2e4, 0x00000000},
8972 -	{0x0000c2e8, 0x00000000},
8973  	{0x0000c2ec, 0x00000000},
8974  	{0x0000c2f0, 0x00000000},
8975  	{0x0000c2f4, 0x00000000},
8976 @@ -835,6 +835,10 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
8977  
8978  static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
8979  	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
8980 +	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
8981 +	{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
8982 +	{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
8983 +	{0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
8984  	{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
8985  	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
8986  	{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
8987 @@ -855,7 +859,7 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
8988  	{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
8989  	{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
8990  	{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
8991 -	{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
8992 +	{0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
8993  	{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
8994  	{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
8995  	{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
8996 @@ -900,6 +904,30 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
8997  	{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
8998  	{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
8999  	{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
9000 +	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9001 +	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9002 +	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9003 +	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9004 +	{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
9005 +	{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
9006 +	{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
9007 +	{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
9008 +	{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
9009 +	{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
9010 +	{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
9011 +	{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9012 +	{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9013 +	{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9014 +	{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9015 +	{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9016 +	{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
9017 +	{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
9018 +	{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
9019 +	{0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9020 +	{0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
9021 +	{0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
9022 +	{0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
9023 +	{0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9024  	{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
9025  	{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
9026  	{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
9027 @@ -913,6 +941,10 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
9028  
9029  static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
9030  	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
9031 +	{0x0000a2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
9032 +	{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
9033 +	{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
9034 +	{0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9035  	{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
9036  	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
9037  	{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
9038 @@ -933,7 +965,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
9039  	{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
9040  	{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
9041  	{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
9042 -	{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
9043 +	{0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
9044  	{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
9045  	{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
9046  	{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
9047 @@ -978,6 +1010,30 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
9048  	{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
9049  	{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
9050  	{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
9051 +	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9052 +	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9053 +	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9054 +	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9055 +	{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
9056 +	{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
9057 +	{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
9058 +	{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
9059 +	{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
9060 +	{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
9061 +	{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
9062 +	{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9063 +	{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9064 +	{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9065 +	{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9066 +	{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
9067 +	{0x0000b2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
9068 +	{0x0000b2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
9069 +	{0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
9070 +	{0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9071 +	{0x0000c2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
9072 +	{0x0000c2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
9073 +	{0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
9074 +	{0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9075  	{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
9076  	{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
9077  	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
9078 @@ -1151,14 +1207,14 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
9079  	{0x0000b074, 0x00000000},
9080  	{0x0000b078, 0x00000000},
9081  	{0x0000b07c, 0x00000000},
9082 -	{0x0000b080, 0x32323232},
9083 -	{0x0000b084, 0x2f2f3232},
9084 -	{0x0000b088, 0x23282a2d},
9085 -	{0x0000b08c, 0x1c1e2123},
9086 -	{0x0000b090, 0x14171919},
9087 -	{0x0000b094, 0x0e0e1214},
9088 -	{0x0000b098, 0x03050707},
9089 -	{0x0000b09c, 0x00030303},
9090 +	{0x0000b080, 0x2a2d2f32},
9091 +	{0x0000b084, 0x21232328},
9092 +	{0x0000b088, 0x19191c1e},
9093 +	{0x0000b08c, 0x12141417},
9094 +	{0x0000b090, 0x07070e0e},
9095 +	{0x0000b094, 0x03030305},
9096 +	{0x0000b098, 0x00000003},
9097 +	{0x0000b09c, 0x00000000},
9098  	{0x0000b0a0, 0x00000000},
9099  	{0x0000b0a4, 0x00000000},
9100  	{0x0000b0a8, 0x00000000},
9101 @@ -1251,6 +1307,10 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
9102  
9103  static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
9104  	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
9105 +	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
9106 +	{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
9107 +	{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
9108 +	{0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9109  	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
9110  	{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9111  	{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
9112 @@ -1316,6 +1376,30 @@ static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
9113  	{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
9114  	{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
9115  	{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
9116 +	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9117 +	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9118 +	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9119 +	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9120 +	{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9121 +	{0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
9122 +	{0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
9123 +	{0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
9124 +	{0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
9125 +	{0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
9126 +	{0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
9127 +	{0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
9128 +	{0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
9129 +	{0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
9130 +	{0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
9131 +	{0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
9132 +	{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
9133 +	{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
9134 +	{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
9135 +	{0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9136 +	{0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
9137 +	{0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
9138 +	{0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
9139 +	{0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
9140  	{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
9141  	{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
9142  	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
9143 @@ -1414,15 +1498,10 @@ static const u32 ar9300_2p2_mac_core[][2] = {
9144  	{0x00008144, 0xffffffff},
9145  	{0x00008168, 0x00000000},
9146  	{0x0000816c, 0x00000000},
9147 -	{0x00008170, 0x18486200},
9148 -	{0x00008174, 0x33332210},
9149 -	{0x00008178, 0x00000000},
9150 -	{0x0000817c, 0x00020000},
9151  	{0x000081c0, 0x00000000},
9152  	{0x000081c4, 0x33332210},
9153  	{0x000081c8, 0x00000000},
9154  	{0x000081cc, 0x00000000},
9155 -	{0x000081d4, 0x00000000},
9156  	{0x000081ec, 0x00000000},
9157  	{0x000081f0, 0x00000000},
9158  	{0x000081f4, 0x00000000},
9159 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
9160 index 057fb69..5f03c53 100644
9161 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
9162 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
9163 @@ -22,12 +22,14 @@
9164  #define COMP_CKSUM_LEN 2
9165  
9166  #define AR_CH0_TOP (0x00016288)
9167 -#define AR_CH0_TOP_XPABIASLVL (0x3)
9168 +#define AR_CH0_TOP_XPABIASLVL (0x300)
9169  #define AR_CH0_TOP_XPABIASLVL_S (8)
9170  
9171  #define AR_CH0_THERM (0x00016290)
9172 -#define AR_CH0_THERM_SPARE (0x3f)
9173 -#define AR_CH0_THERM_SPARE_S (0)
9174 +#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
9175 +#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
9176 +#define AR_CH0_THERM_XPASHORT2GND 0x4
9177 +#define AR_CH0_THERM_XPASHORT2GND_S 2
9178  
9179  #define AR_SWITCH_TABLE_COM_ALL (0xffff)
9180  #define AR_SWITCH_TABLE_COM_ALL_S (0)
9181 @@ -55,6 +57,8 @@
9182  #define SUB_NUM_CTL_MODES_AT_5G_40 2    /* excluding HT40, EXT-OFDM */
9183  #define SUB_NUM_CTL_MODES_AT_2G_40 3    /* excluding HT40, EXT-OFDM, EXT-CCK */
9184  
9185 +#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
9186 +
9187  static const struct ar9300_eeprom ar9300_default = {
9188  	.eepromVersion = 2,
9189  	.templateVersion = 2,
9190 @@ -290,20 +294,21 @@ static const struct ar9300_eeprom ar9300_default = {
9191  		}
9192  	 },
9193  	.ctlPowerData_2G = {
9194 -		 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
9195 -		 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
9196 -		 { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
9197 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
9198 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
9199 +		 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
9200  
9201 -		 { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
9202 -		 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
9203 -		 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
9204 +		 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
9205 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
9206 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
9207  
9208 -		 { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
9209 -		 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
9210 -		 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
9211 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
9212 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
9213 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
9214  
9215 -		 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
9216 -		 { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
9217 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
9218 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
9219 +		 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
9220  	 },
9221  	.modalHeader5G = {
9222  		/* 4 idle,t1,t2,b (4 bits per setting) */
9223 @@ -568,56 +573,56 @@ static const struct ar9300_eeprom ar9300_default = {
9224  	.ctlPowerData_5G = {
9225  		{
9226  			{
9227 -				{60, 1}, {60, 1}, {60, 1}, {60, 1},
9228 -				{60, 1}, {60, 1}, {60, 1}, {60, 0},
9229 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
9230 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
9231  			}
9232  		},
9233  		{
9234  			{
9235 -				{60, 1}, {60, 1}, {60, 1}, {60, 1},
9236 -				{60, 1}, {60, 1}, {60, 1}, {60, 0},
9237 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
9238 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
9239  			}
9240  		},
9241  		{
9242  			{
9243 -				{60, 0}, {60, 1}, {60, 0}, {60, 1},
9244 -				{60, 1}, {60, 1}, {60, 1}, {60, 1},
9245 +				CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
9246 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
9247  			}
9248  		},
9249  		{
9250  			{
9251 -				{60, 0}, {60, 1}, {60, 1}, {60, 0},
9252 -				{60, 1}, {60, 0}, {60, 0}, {60, 0},
9253 +				CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
9254 +				CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
9255  			}
9256  		},
9257  		{
9258  			{
9259 -				{60, 1}, {60, 1}, {60, 1}, {60, 0},
9260 -				{60, 0}, {60, 0}, {60, 0}, {60, 0},
9261 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
9262 +				CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
9263  			}
9264  		},
9265  		{
9266  			{
9267 -				{60, 1}, {60, 1}, {60, 1}, {60, 1},
9268 -				{60, 1}, {60, 0}, {60, 0}, {60, 0},
9269 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
9270 +				CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
9271  			}
9272  		},
9273  		{
9274  			{
9275 -				{60, 1}, {60, 1}, {60, 1}, {60, 1},
9276 -				{60, 1}, {60, 1}, {60, 1}, {60, 1},
9277 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
9278 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
9279  			}
9280  		},
9281  		{
9282  			{
9283 -				{60, 1}, {60, 1}, {60, 0}, {60, 1},
9284 -				{60, 1}, {60, 1}, {60, 1}, {60, 0},
9285 +				CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
9286 +				CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
9287  			}
9288  		},
9289  		{
9290  			{
9291 -				{60, 1}, {60, 0}, {60, 1}, {60, 1},
9292 -				{60, 1}, {60, 1}, {60, 0}, {60, 1},
9293 +				CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
9294 +				CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
9295  			}
9296  		},
9297  	 }
9298 @@ -992,9 +997,9 @@ static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
9299  static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
9300  {
9301  	int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
9302 -	REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
9303 -	REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
9304 -		      ((bias >> 2) & 0x3));
9305 +	REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
9306 +	REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPABIASLVL_MSB, bias >> 2);
9307 +	REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPASHORT2GND, 1);
9308  }
9309  
9310  static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
9311 @@ -1827,9 +1832,9 @@ static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep,
9312  	struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
9313  
9314  	if (is2GHz)
9315 -		return ctl_2g[idx].ctlEdges[edge].tPower;
9316 +		return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]);
9317  	else
9318 -		return ctl_5g[idx].ctlEdges[edge].tPower;
9319 +		return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]);
9320  }
9321  
9322  static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
9323 @@ -1847,12 +1852,12 @@ static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
9324  
9325  	if (is2GHz) {
9326  		if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
9327 -		    ctl_2g[idx].ctlEdges[edge - 1].flag)
9328 -			return ctl_2g[idx].ctlEdges[edge - 1].tPower;
9329 +		    CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1]))
9330 +			return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]);
9331  	} else {
9332  		if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
9333 -		    ctl_5g[idx].ctlEdges[edge - 1].flag)
9334 -			return ctl_5g[idx].ctlEdges[edge - 1].tPower;
9335 +		    CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1]))
9336 +			return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]);
9337  	}
9338  
9339  	return AR9300_MAX_RATE_POWER;
9340 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
9341 index 3c533bb..655b303 100644
9342 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
9343 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
9344 @@ -261,17 +261,12 @@ struct cal_tgt_pow_ht {
9345  	u8 tPow2x[14];
9346  } __packed;
9347  
9348 -struct cal_ctl_edge_pwr {
9349 -	u8 tPower:6,
9350 -	   flag:2;
9351 -} __packed;
9352 -
9353  struct cal_ctl_data_2g {
9354 -	struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
9355 +	u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G];
9356  } __packed;
9357  
9358  struct cal_ctl_data_5g {
9359 -	struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
9360 +	u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
9361  } __packed;
9362  
9363  struct ar9300_eeprom {
9364 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
9365 index 5b995be..1a0ab70 100644
9366 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
9367 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
9368 @@ -616,7 +616,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
9369  			rxs->rs_status |= ATH9K_RXERR_DECRYPT;
9370  		} else if (rxsp->status11 & AR_MichaelErr) {
9371  			rxs->rs_status |= ATH9K_RXERR_MIC;
9372 -		}
9373 +		} else if (rxsp->status11 & AR_KeyMiss)
9374 +			rxs->rs_status |= ATH9K_RXERR_DECRYPT;
9375  	}
9376  
9377  	return 0;
9378 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
9379 index 7c38229..716db41 100644
9380 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
9381 +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
9382 @@ -347,6 +347,10 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
9383  	    (((Y[6] - Y[3]) * 1 << scale_factor) +
9384  	     (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
9385  
9386 +	/* prevent division by zero */
9387 +	if (G_fxp == 0)
9388 +		return false;
9389 +
9390  	Y_intercept =
9391  	    (G_fxp * (x_est[0] - x_est[3]) +
9392  	     (1 << scale_factor)) / (1 << scale_factor) + Y[3];
9393 @@ -356,14 +360,12 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
9394  
9395  	for (i = 0; i <= 3; i++) {
9396  		y_est[i] = i * 32;
9397 -
9398 -		/* prevent division by zero */
9399 -		if (G_fxp == 0)
9400 -			return false;
9401 -
9402  		x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
9403  	}
9404  
9405 +	if (y_est[max_index] == 0)
9406 +		return false;
9407 +
9408  	x_est_fxp1_nonlin =
9409  	    x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
9410  				G_fxp) / G_fxp;
9411 @@ -457,6 +459,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
9412  
9413  	Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
9414  	scale_B = scale_B / (1 << Q_scale_B);
9415 +	if (scale_B == 0)
9416 +		return false;
9417  	Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
9418  	Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
9419  	beta_raw = beta_raw / (1 << Q_beta);
9420 diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
9421 index 07f26ee..852745c 100644
9422 --- a/drivers/net/wireless/ath/ath9k/ath9k.h
9423 +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
9424 @@ -177,8 +177,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
9425  
9426  /* returns delimiter padding required given the packet length */
9427  #define ATH_AGGR_GET_NDELIM(_len)					\
9428 -	(((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ?           \
9429 -	  (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
9430 +       (((_len) >= ATH_AGGR_MINPLEN) ? 0 :                             \
9431 +        DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ))
9432  
9433  #define BAW_WITHIN(_start, _bawsz, _seqno) \
9434  	((((_seqno) - (_start)) & 4095) < (_bawsz))
9435 @@ -312,7 +312,6 @@ struct ath_rx {
9436  	u8 rxotherant;
9437  	u32 *rxlink;
9438  	unsigned int rxfilter;
9439 -	spinlock_t rxflushlock;
9440  	spinlock_t rxbuflock;
9441  	struct list_head rxbuf;
9442  	struct ath_descdma rxdma;
9443 @@ -346,8 +345,8 @@ void ath_tx_tasklet(struct ath_softc *sc);
9444  void ath_tx_edma_tasklet(struct ath_softc *sc);
9445  void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
9446  bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
9447 -void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
9448 -		       u16 tid, u16 *ssn);
9449 +int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
9450 +		      u16 tid, u16 *ssn);
9451  void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
9452  void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
9453  void ath9k_enable_ps(struct ath_softc *sc);
9454 @@ -516,7 +515,6 @@ void ath_deinit_leds(struct ath_softc *sc);
9455  #define SC_OP_RXFLUSH                BIT(7)
9456  #define SC_OP_LED_ASSOCIATED         BIT(8)
9457  #define SC_OP_LED_ON                 BIT(9)
9458 -#define SC_OP_SCANNING               BIT(10)
9459  #define SC_OP_TSF_RESET              BIT(11)
9460  #define SC_OP_BT_PRIORITY_DETECTED   BIT(12)
9461  #define SC_OP_BT_SCAN		     BIT(13)
9462 @@ -558,9 +556,9 @@ struct ath_softc {
9463  	struct ath_hw *sc_ah;
9464  	void __iomem *mem;
9465  	int irq;
9466 -	spinlock_t sc_resetlock;
9467  	spinlock_t sc_serial_rw;
9468  	spinlock_t sc_pm_lock;
9469 +	spinlock_t sc_pcu_lock;
9470  	struct mutex mutex;
9471  	struct work_struct paprd_work;
9472  	struct work_struct hw_check_work;
9473 diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
9474 index 4d4b22d..20cf2d4 100644
9475 --- a/drivers/net/wireless/ath/ath9k/beacon.c
9476 +++ b/drivers/net/wireless/ath/ath9k/beacon.c
9477 @@ -366,7 +366,7 @@ void ath_beacon_tasklet(unsigned long data)
9478  			ath_print(common, ATH_DBG_BEACON,
9479  				  "beacon is officially stuck\n");
9480  			sc->sc_flags |= SC_OP_TSF_RESET;
9481 -			ath_reset(sc, false);
9482 +			ath_reset(sc, true);
9483  		}
9484  
9485  		return;
9486 diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
9487 index c86f7d3..108b433 100644
9488 --- a/drivers/net/wireless/ath/ath9k/common.c
9489 +++ b/drivers/net/wireless/ath/ath9k/common.c
9490 @@ -366,9 +366,13 @@ int ath9k_cmn_key_config(struct ath_common *common,
9491  	set_bit(idx, common->keymap);
9492  	if (key->alg == ALG_TKIP) {
9493  		set_bit(idx + 64, common->keymap);
9494 +		set_bit(idx, common->tkip_keymap);
9495 +		set_bit(idx + 64, common->tkip_keymap);
9496  		if (common->splitmic) {
9497  			set_bit(idx + 32, common->keymap);
9498  			set_bit(idx + 64 + 32, common->keymap);
9499 +			set_bit(idx + 32, common->tkip_keymap);
9500 +			set_bit(idx + 64 + 32, common->tkip_keymap);
9501  		}
9502  	}
9503  
9504 @@ -393,10 +397,17 @@ void ath9k_cmn_key_delete(struct ath_common *common,
9505  		return;
9506  
9507  	clear_bit(key->hw_key_idx + 64, common->keymap);
9508 +
9509 +	clear_bit(key->hw_key_idx, common->tkip_keymap);
9510 +	clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
9511 +
9512  	if (common->splitmic) {
9513  		ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
9514  		clear_bit(key->hw_key_idx + 32, common->keymap);
9515  		clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
9516 +
9517 +		clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
9518 +		clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
9519  	}
9520  }
9521  EXPORT_SYMBOL(ath9k_cmn_key_delete);
9522 diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
9523 index 1266333..2bbf94d 100644
9524 --- a/drivers/net/wireless/ath/ath9k/eeprom.c
9525 +++ b/drivers/net/wireless/ath/ath9k/eeprom.c
9526 @@ -240,16 +240,16 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
9527  	for (i = 0; (i < num_band_edges) &&
9528  		     (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
9529  		if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
9530 -			twiceMaxEdgePower = pRdEdgesPower[i].tPower;
9531 +			twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl);
9532  			break;
9533  		} else if ((i > 0) &&
9534  			   (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
9535  						      is2GHz))) {
9536  			if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
9537  					       is2GHz) < freq &&
9538 -			    pRdEdgesPower[i - 1].flag) {
9539 +			    CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) {
9540  				twiceMaxEdgePower =
9541 -					pRdEdgesPower[i - 1].tPower;
9542 +					CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl);
9543  			}
9544  			break;
9545  		}
9546 diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
9547 index 0b09db0..17068f9 100644
9548 --- a/drivers/net/wireless/ath/ath9k/eeprom.h
9549 +++ b/drivers/net/wireless/ath/ath9k/eeprom.h
9550 @@ -233,6 +233,9 @@
9551  
9552  #define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1)
9553  
9554 +#define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f)
9555 +#define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03)
9556 +
9557  enum eeprom_param {
9558  	EEP_NFTHRESH_5,
9559  	EEP_NFTHRESH_2,
9560 @@ -533,18 +536,10 @@ struct cal_target_power_ht {
9561  	u8 tPow2x[8];
9562  } __packed;
9563  
9564 -
9565 -#ifdef __BIG_ENDIAN_BITFIELD
9566 -struct cal_ctl_edges {
9567 -	u8 bChannel;
9568 -	u8 flag:2, tPower:6;
9569 -} __packed;
9570 -#else
9571  struct cal_ctl_edges {
9572  	u8 bChannel;
9573 -	u8 tPower:6, flag:2;
9574 +	u8 ctl;
9575  } __packed;
9576 -#endif
9577  
9578  struct cal_data_op_loop_ar9287 {
9579  	u8 pwrPdg[2][5];
9580 diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
9581 index dff2da7..7cb356e 100644
9582 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
9583 +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
9584 @@ -37,7 +37,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
9585  	int addr, eep_start_loc;
9586  	eep_data = (u16 *)eep;
9587  
9588 -	if (ah->hw_version.devid == 0x7015)
9589 +	if (AR9287_HTC_DEVID(ah))
9590  		eep_start_loc = AR9287_HTC_EEP_START_LOC;
9591  	else
9592  		eep_start_loc = AR9287_EEP_START_LOC;
9593 diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
9594 index afa2b73..a48eb57 100644
9595 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
9596 +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
9597 @@ -1062,15 +1062,19 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
9598  	case 1:
9599  		break;
9600  	case 2:
9601 -		scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
9602 +		if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
9603 +			scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
9604 +		else
9605 +			scaledPower = 0;
9606  		break;
9607  	case 3:
9608 -		scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
9609 +		if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
9610 +			scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
9611 +		else
9612 +			scaledPower = 0;
9613  		break;
9614  	}
9615  
9616 -	scaledPower = max((u16)0, scaledPower);
9617 -
9618  	if (IS_CHAN_2GHZ(chan)) {
9619  		numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
9620  			SUB_NUM_CTL_MODES_AT_2G_40;
9621 diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
9622 index 17e7a9a..0e9bbb1 100644
9623 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c
9624 +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
9625 @@ -35,8 +35,14 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
9626  	{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
9627  	{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
9628  	{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
9629 +	{ USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
9630 +	{ USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */
9631 +	{ USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
9632 +	{ USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
9633  	{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
9634  	{ USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
9635 +	{ USB_DEVICE(0x040D, 0x3801) }, /* VIA */
9636 +	{ USB_DEVICE(0x1668, 0x1200) }, /* Verizon */
9637  	{ },
9638  };
9639  
9640 @@ -138,16 +144,36 @@ static void hif_usb_tx_cb(struct urb *urb)
9641  	case -ENODEV:
9642  	case -ESHUTDOWN:
9643  		/*
9644 -		 * The URB has been killed, free the SKBs
9645 -		 * and return.
9646 +		 * The URB has been killed, free the SKBs.
9647  		 */
9648  		ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
9649 -		return;
9650 +
9651 +		/*
9652 +		 * If the URBs are being flushed, no need to add this
9653 +		 * URB to the free list.
9654 +		 */
9655 +		spin_lock(&hif_dev->tx.tx_lock);
9656 +		if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
9657 +			spin_unlock(&hif_dev->tx.tx_lock);
9658 +			return;
9659 +		}
9660 +		spin_unlock(&hif_dev->tx.tx_lock);
9661 +
9662 +		/*
9663 +		 * In the stop() case, this URB has to be added to
9664 +		 * the free list.
9665 +		 */
9666 +		goto add_free;
9667  	default:
9668  		break;
9669  	}
9670  
9671 -	/* Check if TX has been stopped */
9672 +	/*
9673 +	 * Check if TX has been stopped, this is needed because
9674 +	 * this CB could have been invoked just after the TX lock
9675 +	 * was released in hif_stop() and kill_urb() hasn't been
9676 +	 * called yet.
9677 +	 */
9678  	spin_lock(&hif_dev->tx.tx_lock);
9679  	if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
9680  		spin_unlock(&hif_dev->tx.tx_lock);
9681 @@ -299,6 +325,7 @@ static void hif_usb_start(void *hif_handle, u8 pipe_id)
9682  static void hif_usb_stop(void *hif_handle, u8 pipe_id)
9683  {
9684  	struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
9685 +	struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
9686  	unsigned long flags;
9687  
9688  	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
9689 @@ -306,6 +333,12 @@ static void hif_usb_stop(void *hif_handle, u8 pipe_id)
9690  	hif_dev->tx.tx_skb_cnt = 0;
9691  	hif_dev->tx.flags |= HIF_USB_TX_STOP;
9692  	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
9693 +
9694 +	/* The pending URBs have to be canceled. */
9695 +	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
9696 +				 &hif_dev->tx.tx_pending, list) {
9697 +		usb_kill_urb(tx_buf->urb);
9698 +	}
9699  }
9700  
9701  static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
9702 @@ -571,6 +604,7 @@ free:
9703  static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
9704  {
9705  	struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
9706 +	unsigned long flags;
9707  
9708  	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
9709  				 &hif_dev->tx.tx_buf, list) {
9710 @@ -581,6 +615,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
9711  		kfree(tx_buf);
9712  	}
9713  
9714 +	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
9715 +	hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
9716 +	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
9717 +
9718  	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
9719  				 &hif_dev->tx.tx_pending, list) {
9720  		usb_kill_urb(tx_buf->urb);
9721 @@ -799,10 +837,18 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
9722  	}
9723  	kfree(buf);
9724  
9725 -	if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015))
9726 +	switch (hif_dev->device_id) {
9727 +	case 0x7010:
9728 +	case 0x7015:
9729 +	case 0x9018:
9730 +	case 0xA704:
9731 +	case 0x1200:
9732  		firm_offset = AR7010_FIRMWARE_TEXT;
9733 -	else
9734 +		break;
9735 +	default:
9736  		firm_offset = AR9271_FIRMWARE_TEXT;
9737 +		break;
9738 +	}
9739  
9740  	/*
9741  	 * Issue FW download complete command to firmware.
9742 @@ -903,6 +949,8 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
9743  	case 0x7010:
9744  	case 0x7015:
9745  	case 0x9018:
9746 +	case 0xA704:
9747 +	case 0x1200:
9748  		if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
9749  			hif_dev->fw_name = FIRMWARE_AR7010_1_1;
9750  		else
9751 diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
9752 index 2daf97b..30d0938 100644
9753 --- a/drivers/net/wireless/ath/ath9k/hif_usb.h
9754 +++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
9755 @@ -62,6 +62,7 @@ struct tx_buf {
9756  };
9757  
9758  #define HIF_USB_TX_STOP  BIT(0)
9759 +#define HIF_USB_TX_FLUSH BIT(1)
9760  
9761  struct hif_usb_tx {
9762  	u8 flags;
9763 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
9764 index 2d42791..e5a0122 100644
9765 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
9766 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
9767 @@ -247,6 +247,8 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
9768  	case 0x7010:
9769  	case 0x7015:
9770  	case 0x9018:
9771 +	case 0xA704:
9772 +	case 0x1200:
9773  		priv->htc->credits = 45;
9774  		break;
9775  	default:
9776 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
9777 index 2a6e45a..f06eeab 100644
9778 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
9779 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
9780 @@ -121,7 +121,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
9781  			tx_hdr.data_type = ATH9K_HTC_NORMAL;
9782  		}
9783  
9784 -		if (ieee80211_is_data(fc)) {
9785 +		if (ieee80211_is_data_qos(fc)) {
9786  			qc = ieee80211_get_qos_ctl(hdr);
9787  			tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
9788  		}
9789 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
9790 index 3384ca1..5250c8d 100644
9791 --- a/drivers/net/wireless/ath/ath9k/hw.c
9792 +++ b/drivers/net/wireless/ath/ath9k/hw.c
9793 @@ -387,6 +387,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
9794  	else
9795  		ah->config.ht_enable = 0;
9796  
9797 +	/* PAPRD needs some more work to be enabled */
9798 +	ah->config.paprd_disable = 1;
9799 +
9800  	ah->config.rx_intr_mitigation = true;
9801  	ah->config.pcieSerDesWrite = true;
9802  
9803 @@ -486,6 +489,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
9804  		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
9805  			  "Failed allocating banks for "
9806  			  "external radio\n");
9807 +		ath9k_hw_rf_free_ext_banks(ah);
9808  		return ecode;
9809  	}
9810  
9811 @@ -2263,7 +2267,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
9812  		pCap->rx_status_len = sizeof(struct ar9003_rxs);
9813  		pCap->tx_desc_len = sizeof(struct ar9003_txc);
9814  		pCap->txs_len = sizeof(struct ar9003_txs);
9815 -		if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
9816 +		if (!ah->config.paprd_disable &&
9817 +		    ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
9818  			pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
9819  	} else {
9820  		pCap->tx_desc_len = sizeof(struct ath_desc);
9821 @@ -2350,7 +2355,8 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
9822  		val = REG_READ(ah, AR7010_GPIO_IN);
9823  		return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
9824  	} else if (AR_SREV_9300_20_OR_LATER(ah))
9825 -		return MS_REG_READ(AR9300, gpio) != 0;
9826 +		return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
9827 +			AR_GPIO_BIT(gpio)) != 0;
9828  	else if (AR_SREV_9271(ah))
9829  		return MS_REG_READ(AR9271, gpio) != 0;
9830  	else if (AR_SREV_9287_10_OR_LATER(ah))
9831 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
9832 index 399f7c1..e58ff11 100644
9833 --- a/drivers/net/wireless/ath/ath9k/hw.h
9834 +++ b/drivers/net/wireless/ath/ath9k/hw.h
9835 @@ -240,6 +240,7 @@ struct ath9k_ops_config {
9836  	u32 pcie_waen;
9837  	u8 analog_shiftreg;
9838  	u8 ht_enable;
9839 +	u8 paprd_disable;
9840  	u32 ofdm_trig_low;
9841  	u32 ofdm_trig_high;
9842  	u32 cck_trig_high;
9843 diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
9844 index 243c177..2e4724f 100644
9845 --- a/drivers/net/wireless/ath/ath9k/init.c
9846 +++ b/drivers/net/wireless/ath/ath9k/init.c
9847 @@ -56,7 +56,7 @@ MODULE_PARM_DESC(blink, "Enable LED blink on activity");
9848   * on 5 MHz steps, we support the channels which we know
9849   * we have calibration data for all cards though to make
9850   * this static */
9851 -static struct ieee80211_channel ath9k_2ghz_chantable[] = {
9852 +static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
9853  	CHAN2G(2412, 0), /* Channel 1 */
9854  	CHAN2G(2417, 1), /* Channel 2 */
9855  	CHAN2G(2422, 2), /* Channel 3 */
9856 @@ -77,7 +77,7 @@ static struct ieee80211_channel ath9k_2ghz_chantable[] = {
9857   * on 5 MHz steps, we support the channels which we know
9858   * we have calibration data for all cards though to make
9859   * this static */
9860 -static struct ieee80211_channel ath9k_5ghz_chantable[] = {
9861 +static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
9862  	/* _We_ call this UNII 1 */
9863  	CHAN5G(5180, 14), /* Channel 36 */
9864  	CHAN5G(5200, 15), /* Channel 40 */
9865 @@ -477,10 +477,17 @@ err:
9866  	return -EIO;
9867  }
9868  
9869 -static void ath9k_init_channels_rates(struct ath_softc *sc)
9870 +static int ath9k_init_channels_rates(struct ath_softc *sc)
9871  {
9872 +	void *channels;
9873 +
9874  	if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
9875 -		sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
9876 +		channels = kmemdup(ath9k_2ghz_chantable,
9877 +			sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
9878 +		if (!channels)
9879 +		    return -ENOMEM;
9880 +
9881 +		sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
9882  		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
9883  		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
9884  			ARRAY_SIZE(ath9k_2ghz_chantable);
9885 @@ -490,7 +497,15 @@ static void ath9k_init_channels_rates(struct ath_softc *sc)
9886  	}
9887  
9888  	if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
9889 -		sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
9890 +		channels = kmemdup(ath9k_5ghz_chantable,
9891 +			sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
9892 +		if (!channels) {
9893 +			if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
9894 +				kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
9895 +			return -ENOMEM;
9896 +		}
9897 +
9898 +		sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
9899  		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
9900  		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
9901  			ARRAY_SIZE(ath9k_5ghz_chantable);
9902 @@ -499,6 +514,7 @@ static void ath9k_init_channels_rates(struct ath_softc *sc)
9903  		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
9904  			ARRAY_SIZE(ath9k_legacy_rates) - 4;
9905  	}
9906 +	return 0;
9907  }
9908  
9909  static void ath9k_init_misc(struct ath_softc *sc)
9910 @@ -558,7 +574,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
9911  	common->debug_mask = ath9k_debug;
9912  
9913  	spin_lock_init(&sc->wiphy_lock);
9914 -	spin_lock_init(&sc->sc_resetlock);
9915  	spin_lock_init(&sc->sc_serial_rw);
9916  	spin_lock_init(&sc->sc_pm_lock);
9917  	mutex_init(&sc->mutex);
9918 @@ -593,8 +608,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
9919  	if (ret)
9920  		goto err_btcoex;
9921  
9922 +	ret = ath9k_init_channels_rates(sc);
9923 +	if (ret)
9924 +		goto err_btcoex;
9925 +
9926  	ath9k_init_crypto(sc);
9927 -	ath9k_init_channels_rates(sc);
9928  	ath9k_init_misc(sc);
9929  
9930  	return 0;
9931 @@ -641,7 +659,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
9932  		BIT(NL80211_IFTYPE_ADHOC) |
9933  		BIT(NL80211_IFTYPE_MESH_POINT);
9934  
9935 -	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
9936 +	if (AR_SREV_5416(sc->sc_ah))
9937 +		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
9938  
9939  	hw->queues = 4;
9940  	hw->max_rates = 4;
9941 @@ -751,6 +770,12 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
9942  {
9943  	int i = 0;
9944  
9945 +	if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
9946 +		kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
9947 +
9948 +	if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
9949 +		kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
9950 +
9951          if ((sc->btcoex.no_stomp_timer) &&
9952  	    sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
9953  		ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
9954 diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
9955 index e955bb9..79302b1 100644
9956 --- a/drivers/net/wireless/ath/ath9k/mac.c
9957 +++ b/drivers/net/wireless/ath/ath9k/mac.c
9958 @@ -713,6 +713,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
9959  			rs->rs_status |= ATH9K_RXERR_DECRYPT;
9960  		else if (ads.ds_rxstatus8 & AR_MichaelErr)
9961  			rs->rs_status |= ATH9K_RXERR_MIC;
9962 +		else if (ads.ds_rxstatus8 & AR_KeyMiss)
9963 +			rs->rs_status |= ATH9K_RXERR_DECRYPT;
9964  	}
9965  
9966  	return 0;
9967 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
9968 index 3caa323..9790d3a 100644
9969 --- a/drivers/net/wireless/ath/ath9k/main.c
9970 +++ b/drivers/net/wireless/ath/ath9k/main.c
9971 @@ -213,6 +213,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
9972  	 */
9973  	ath9k_hw_set_interrupts(ah, 0);
9974  	ath_drain_all_txq(sc, false);
9975 +
9976 +	spin_lock_bh(&sc->sc_pcu_lock);
9977 +
9978  	stopped = ath_stoprecv(sc);
9979  
9980  	/* XXX: do not flush receive queue here. We don't want
9981 @@ -230,34 +233,35 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
9982  		  sc->sc_ah->curchan->channel,
9983  		  channel->center_freq, conf_is_ht40(conf));
9984  
9985 -	spin_lock_bh(&sc->sc_resetlock);
9986 -
9987  	r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
9988  	if (r) {
9989  		ath_print(common, ATH_DBG_FATAL,
9990  			  "Unable to reset channel (%u MHz), "
9991  			  "reset status %d\n",
9992  			  channel->center_freq, r);
9993 -		spin_unlock_bh(&sc->sc_resetlock);
9994 +		spin_unlock_bh(&sc->sc_pcu_lock);
9995  		goto ps_restore;
9996  	}
9997 -	spin_unlock_bh(&sc->sc_resetlock);
9998  
9999  	if (ath_startrecv(sc) != 0) {
10000  		ath_print(common, ATH_DBG_FATAL,
10001  			  "Unable to restart recv logic\n");
10002  		r = -EIO;
10003 +		spin_unlock_bh(&sc->sc_pcu_lock);
10004  		goto ps_restore;
10005  	}
10006  
10007 +	spin_unlock_bh(&sc->sc_pcu_lock);
10008 +
10009  	ath_cache_conf_rate(sc, &hw->conf);
10010  	ath_update_txpow(sc);
10011  	ath9k_hw_set_interrupts(ah, ah->imask);
10012  
10013 -	if (!(sc->sc_flags & (SC_OP_OFFCHANNEL | SC_OP_SCANNING))) {
10014 -		ath_start_ani(common);
10015 +	if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
10016 +		if (sc->sc_flags & SC_OP_BEACONS)
10017 +			ath_beacon_config(sc, NULL);
10018  		ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
10019 -		ath_beacon_config(sc, NULL);
10020 +		ath_start_ani(common);
10021  	}
10022  
10023   ps_restore:
10024 @@ -269,6 +273,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
10025  {
10026  	struct ath_hw *ah = sc->sc_ah;
10027  	struct ath9k_hw_cal_data *caldata = ah->caldata;
10028 +	struct ath_common *common = ath9k_hw_common(ah);
10029  	int chain;
10030  
10031  	if (!caldata || !caldata->paprd_done)
10032 @@ -277,7 +282,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
10033  	ath9k_ps_wakeup(sc);
10034  	ar9003_paprd_enable(ah, false);
10035  	for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
10036 -		if (!(ah->caps.tx_chainmask & BIT(chain)))
10037 +		if (!(common->tx_chainmask & BIT(chain)))
10038  			continue;
10039  
10040  		ar9003_paprd_populate_single_table(ah, caldata, chain);
10041 @@ -299,6 +304,7 @@ void ath_paprd_calibrate(struct work_struct *work)
10042  	struct ieee80211_supported_band *sband = &sc->sbands[band];
10043  	struct ath_tx_control txctl;
10044  	struct ath9k_hw_cal_data *caldata = ah->caldata;
10045 +	struct ath_common *common = ath9k_hw_common(ah);
10046  	int qnum, ftype;
10047  	int chain_ok = 0;
10048  	int chain;
10049 @@ -332,7 +338,7 @@ void ath_paprd_calibrate(struct work_struct *work)
10050  	ath9k_ps_wakeup(sc);
10051  	ar9003_paprd_init_table(ah);
10052  	for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
10053 -		if (!(ah->caps.tx_chainmask & BIT(chain)))
10054 +		if (!(common->tx_chainmask & BIT(chain)))
10055  			continue;
10056  
10057  		chain_ok = 0;
10058 @@ -550,7 +556,7 @@ void ath_hw_check(struct work_struct *work)
10059  
10060  		msleep(1);
10061  	}
10062 -	ath_reset(sc, false);
10063 +	ath_reset(sc, true);
10064  
10065  out:
10066  	ath9k_ps_restore(sc);
10067 @@ -568,7 +574,7 @@ void ath9k_tasklet(unsigned long data)
10068  	ath9k_ps_wakeup(sc);
10069  
10070  	if (status & ATH9K_INT_FATAL) {
10071 -		ath_reset(sc, false);
10072 +		ath_reset(sc, true);
10073  		ath9k_ps_restore(sc);
10074  		return;
10075  	}
10076 @@ -583,7 +589,7 @@ void ath9k_tasklet(unsigned long data)
10077  		rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
10078  
10079  	if (status & rxmask) {
10080 -		spin_lock_bh(&sc->rx.rxflushlock);
10081 +		spin_lock_bh(&sc->sc_pcu_lock);
10082  
10083  		/* Check for high priority Rx first */
10084  		if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
10085 @@ -591,7 +597,7 @@ void ath9k_tasklet(unsigned long data)
10086  			ath_rx_tasklet(sc, 0, true);
10087  
10088  		ath_rx_tasklet(sc, 0, false);
10089 -		spin_unlock_bh(&sc->rx.rxflushlock);
10090 +		spin_unlock_bh(&sc->sc_pcu_lock);
10091  	}
10092  
10093  	if (status & ATH9K_INT_TX) {
10094 @@ -838,7 +844,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
10095  	if (!ah->curchan)
10096  		ah->curchan = ath_get_curchannel(sc, sc->hw);
10097  
10098 -	spin_lock_bh(&sc->sc_resetlock);
10099 +	spin_lock_bh(&sc->sc_pcu_lock);
10100  	r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
10101  	if (r) {
10102  		ath_print(common, ATH_DBG_FATAL,
10103 @@ -846,14 +852,15 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
10104  			  "reset status %d\n",
10105  			  channel->center_freq, r);
10106  	}
10107 -	spin_unlock_bh(&sc->sc_resetlock);
10108  
10109  	ath_update_txpow(sc);
10110  	if (ath_startrecv(sc) != 0) {
10111  		ath_print(common, ATH_DBG_FATAL,
10112  			  "Unable to restart recv logic\n");
10113 +		spin_unlock_bh(&sc->sc_pcu_lock);
10114  		return;
10115  	}
10116 +	spin_unlock_bh(&sc->sc_pcu_lock);
10117  
10118  	if (sc->sc_flags & SC_OP_BEACONS)
10119  		ath_beacon_config(sc, NULL);	/* restart beacons */
10120 @@ -892,13 +899,15 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
10121  	ath9k_hw_set_interrupts(ah, 0);
10122  
10123  	ath_drain_all_txq(sc, false);	/* clear pending tx frames */
10124 +
10125 +	spin_lock_bh(&sc->sc_pcu_lock);
10126 +
10127  	ath_stoprecv(sc);		/* turn off frame recv */
10128  	ath_flushrecv(sc);		/* flush recv queue */
10129  
10130  	if (!ah->curchan)
10131  		ah->curchan = ath_get_curchannel(sc, hw);
10132  
10133 -	spin_lock_bh(&sc->sc_resetlock);
10134  	r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
10135  	if (r) {
10136  		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
10137 @@ -906,9 +915,11 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
10138  			  "reset status %d\n",
10139  			  channel->center_freq, r);
10140  	}
10141 -	spin_unlock_bh(&sc->sc_resetlock);
10142  
10143  	ath9k_hw_phy_disable(ah);
10144 +
10145 +	spin_unlock_bh(&sc->sc_pcu_lock);
10146 +
10147  	ath9k_hw_configpcipowersave(ah, 1, 1);
10148  	ath9k_ps_restore(sc);
10149  	ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
10150 @@ -928,20 +939,23 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
10151  
10152  	ath9k_hw_set_interrupts(ah, 0);
10153  	ath_drain_all_txq(sc, retry_tx);
10154 +
10155 +	spin_lock_bh(&sc->sc_pcu_lock);
10156 +
10157  	ath_stoprecv(sc);
10158  	ath_flushrecv(sc);
10159  
10160 -	spin_lock_bh(&sc->sc_resetlock);
10161  	r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
10162  	if (r)
10163  		ath_print(common, ATH_DBG_FATAL,
10164  			  "Unable to reset hardware; reset status %d\n", r);
10165 -	spin_unlock_bh(&sc->sc_resetlock);
10166  
10167  	if (ath_startrecv(sc) != 0)
10168  		ath_print(common, ATH_DBG_FATAL,
10169  			  "Unable to start recv logic\n");
10170  
10171 +	spin_unlock_bh(&sc->sc_pcu_lock);
10172 +
10173  	/*
10174  	 * We may be doing a reset in response to a request
10175  	 * that changes the channel so update any state that
10176 @@ -951,7 +965,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
10177  
10178  	ath_update_txpow(sc);
10179  
10180 -	if (sc->sc_flags & SC_OP_BEACONS)
10181 +	if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
10182  		ath_beacon_config(sc, NULL);	/* restart beacons */
10183  
10184  	ath9k_hw_set_interrupts(ah, ah->imask);
10185 @@ -1106,17 +1120,16 @@ static int ath9k_start(struct ieee80211_hw *hw)
10186  	 * be followed by initialization of the appropriate bits
10187  	 * and then setup of the interrupt mask.
10188  	 */
10189 -	spin_lock_bh(&sc->sc_resetlock);
10190 +	spin_lock_bh(&sc->sc_pcu_lock);
10191  	r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
10192  	if (r) {
10193  		ath_print(common, ATH_DBG_FATAL,
10194  			  "Unable to reset hardware; reset status %d "
10195  			  "(freq %u MHz)\n", r,
10196  			  curchan->center_freq);
10197 -		spin_unlock_bh(&sc->sc_resetlock);
10198 +		spin_unlock_bh(&sc->sc_pcu_lock);
10199  		goto mutex_unlock;
10200  	}
10201 -	spin_unlock_bh(&sc->sc_resetlock);
10202  
10203  	/*
10204  	 * This is needed only to setup initial state
10205 @@ -1135,8 +1148,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
10206  		ath_print(common, ATH_DBG_FATAL,
10207  			  "Unable to start recv logic\n");
10208  		r = -EIO;
10209 +		spin_unlock_bh(&sc->sc_pcu_lock);
10210  		goto mutex_unlock;
10211  	}
10212 +	spin_unlock_bh(&sc->sc_pcu_lock);
10213  
10214  	/* Setup our intr mask. */
10215  	ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
10216 @@ -1340,18 +1355,24 @@ static void ath9k_stop(struct ieee80211_hw *hw)
10217  
10218  	if (!(sc->sc_flags & SC_OP_INVALID)) {
10219  		ath_drain_all_txq(sc, false);
10220 +		spin_lock_bh(&sc->sc_pcu_lock);
10221  		ath_stoprecv(sc);
10222  		ath9k_hw_phy_disable(ah);
10223 -	} else
10224 +		spin_unlock_bh(&sc->sc_pcu_lock);
10225 +	} else {
10226 +		spin_lock_bh(&sc->sc_pcu_lock);
10227  		sc->rx.rxlink = NULL;
10228 +		spin_unlock_bh(&sc->sc_pcu_lock);
10229 +	}
10230  
10231  	/* disable HAL and put h/w to sleep */
10232  	ath9k_hw_disable(ah);
10233  	ath9k_hw_configpcipowersave(ah, 1, 1);
10234  	ath9k_ps_restore(sc);
10235  
10236 -	/* Finally, put the chip in FULL SLEEP mode */
10237 -	ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
10238 +	sc->ps_idle = true;
10239 +	ath9k_set_wiphy_idle(aphy, true);
10240 +	ath_radio_disable(sc, hw);
10241  
10242  	sc->sc_flags |= SC_OP_INVALID;
10243  
10244 @@ -1455,6 +1476,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
10245  	struct ath_softc *sc = aphy->sc;
10246  	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
10247  	struct ath_vif *avp = (void *)vif->drv_priv;
10248 +	bool bs_valid = false;
10249  	int i;
10250  
10251  	ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
10252 @@ -1483,7 +1505,15 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
10253  			       "slot\n", __func__);
10254  			sc->beacon.bslot[i] = NULL;
10255  			sc->beacon.bslot_aphy[i] = NULL;
10256 -		}
10257 +		} else if (sc->beacon.bslot[i])
10258 +			bs_valid = true;
10259 +	}
10260 +	if (!bs_valid && (sc->sc_ah->imask & ATH9K_INT_SWBA)) {
10261 +		/* Disable SWBA interrupt */
10262 +		sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
10263 +		ath9k_ps_wakeup(sc);
10264 +		ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
10265 +		ath9k_ps_restore(sc);
10266  	}
10267  
10268  	sc->nvifs--;
10269 @@ -1556,6 +1586,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
10270  	 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
10271  	 */
10272  	if (changed & IEEE80211_CONF_CHANGE_PS) {
10273 +		unsigned long flags;
10274 +		spin_lock_irqsave(&sc->sc_pm_lock, flags);
10275  		if (conf->flags & IEEE80211_CONF_PS) {
10276  			sc->ps_flags |= PS_ENABLED;
10277  			/*
10278 @@ -1570,7 +1602,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
10279  			sc->ps_enabled = false;
10280  			sc->ps_flags &= ~(PS_ENABLED |
10281  					  PS_NULLFUNC_COMPLETED);
10282 -			ath9k_setpower(sc, ATH9K_PM_AWAKE);
10283 +			ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
10284  			if (!(ah->caps.hw_caps &
10285  			      ATH9K_HW_CAP_AUTOSLEEP)) {
10286  				ath9k_hw_setrxabort(sc->sc_ah, 0);
10287 @@ -1585,6 +1617,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
10288  				}
10289  			}
10290  		}
10291 +		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
10292  	}
10293  
10294  	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
10295 @@ -1968,8 +2001,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
10296  		break;
10297  	case IEEE80211_AMPDU_TX_START:
10298  		ath9k_ps_wakeup(sc);
10299 -		ath_tx_aggr_start(sc, sta, tid, ssn);
10300 -		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
10301 +		ret = ath_tx_aggr_start(sc, sta, tid, ssn);
10302 +		if (!ret)
10303 +			ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
10304  		ath9k_ps_restore(sc);
10305  		break;
10306  	case IEEE80211_AMPDU_TX_STOP:
10307 @@ -2032,7 +2066,6 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
10308  
10309  	aphy->state = ATH_WIPHY_SCAN;
10310  	ath9k_wiphy_pause_all_forced(sc, aphy);
10311 -	sc->sc_flags |= SC_OP_SCANNING;
10312  	mutex_unlock(&sc->mutex);
10313  }
10314  
10315 @@ -2047,7 +2080,6 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
10316  
10317  	mutex_lock(&sc->mutex);
10318  	aphy->state = ATH_WIPHY_ACTIVE;
10319 -	sc->sc_flags &= ~SC_OP_SCANNING;
10320  	mutex_unlock(&sc->mutex);
10321  }
10322  
10323 diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
10324 index b5b6514..894ebadb 100644
10325 --- a/drivers/net/wireless/ath/ath9k/pci.c
10326 +++ b/drivers/net/wireless/ath/ath9k/pci.c
10327 @@ -290,6 +290,10 @@ static int ath_pci_resume(struct pci_dev *pdev)
10328  			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
10329  	ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
10330  
10331 +	sc->ps_idle = true;
10332 +	ath9k_set_wiphy_idle(aphy, true);
10333 +	ath_radio_disable(sc, hw);
10334 +
10335  	return 0;
10336  }
10337  
10338 diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
10339 index e49be73..5f825ce 100644
10340 --- a/drivers/net/wireless/ath/ath9k/rc.c
10341 +++ b/drivers/net/wireless/ath/ath9k/rc.c
10342 @@ -538,7 +538,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
10343  	for (i = 0; i < rateset->rs_nrates; i++) {
10344  		for (j = 0; j < rate_table->rate_cnt; j++) {
10345  			u32 phy = rate_table->info[j].phy;
10346 -			u16 rate_flags = rate_table->info[i].rate_flags;
10347 +			u16 rate_flags = rate_table->info[j].rate_flags;
10348  			u8 rate = rateset->rs_rates[i];
10349  			u8 dot11rate = rate_table->info[j].dot11rate;
10350  
10351 @@ -1359,6 +1359,12 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
10352  	if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
10353  		return;
10354  
10355 +	if (!(tx_info->flags & IEEE80211_TX_STAT_AMPDU)) {
10356 +		tx_info->status.ampdu_ack_len =
10357 +			(tx_info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
10358 +		tx_info->status.ampdu_len = 1;
10359 +	}
10360 +
10361  	/*
10362  	 * If an underrun error is seen assume it as an excessive retry only
10363  	 * if max frame trigger level has been reached (2 KB for singel stream,
10364 diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
10365 index a3fc987..912f747 100644
10366 --- a/drivers/net/wireless/ath/ath9k/recv.c
10367 +++ b/drivers/net/wireless/ath/ath9k/recv.c
10368 @@ -288,19 +288,17 @@ static void ath_edma_start_recv(struct ath_softc *sc)
10369  	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
10370  			      sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
10371  
10372 -	spin_unlock_bh(&sc->rx.rxbuflock);
10373 -
10374  	ath_opmode_init(sc);
10375  
10376 -	ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING));
10377 +	ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
10378 +
10379 +	spin_unlock_bh(&sc->rx.rxbuflock);
10380  }
10381  
10382  static void ath_edma_stop_recv(struct ath_softc *sc)
10383  {
10384 -	spin_lock_bh(&sc->rx.rxbuflock);
10385  	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
10386  	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
10387 -	spin_unlock_bh(&sc->rx.rxbuflock);
10388  }
10389  
10390  int ath_rx_init(struct ath_softc *sc, int nbufs)
10391 @@ -310,7 +308,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
10392  	struct ath_buf *bf;
10393  	int error = 0;
10394  
10395 -	spin_lock_init(&sc->rx.rxflushlock);
10396 +	spin_lock_init(&sc->sc_pcu_lock);
10397  	sc->sc_flags &= ~SC_OP_RXFLUSH;
10398  	spin_lock_init(&sc->rx.rxbuflock);
10399  
10400 @@ -496,9 +494,10 @@ int ath_startrecv(struct ath_softc *sc)
10401  	ath9k_hw_rxena(ah);
10402  
10403  start_recv:
10404 -	spin_unlock_bh(&sc->rx.rxbuflock);
10405  	ath_opmode_init(sc);
10406 -	ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING));
10407 +	ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
10408 +
10409 +	spin_unlock_bh(&sc->rx.rxbuflock);
10410  
10411  	return 0;
10412  }
10413 @@ -508,7 +507,8 @@ bool ath_stoprecv(struct ath_softc *sc)
10414  	struct ath_hw *ah = sc->sc_ah;
10415  	bool stopped;
10416  
10417 -	ath9k_hw_stoppcurecv(ah);
10418 +	spin_lock_bh(&sc->rx.rxbuflock);
10419 +	ath9k_hw_abortpcurecv(ah);
10420  	ath9k_hw_setrxfilter(ah, 0);
10421  	stopped = ath9k_hw_stopdmarecv(ah);
10422  
10423 @@ -516,19 +516,18 @@ bool ath_stoprecv(struct ath_softc *sc)
10424  		ath_edma_stop_recv(sc);
10425  	else
10426  		sc->rx.rxlink = NULL;
10427 +	spin_unlock_bh(&sc->rx.rxbuflock);
10428  
10429  	return stopped;
10430  }
10431  
10432  void ath_flushrecv(struct ath_softc *sc)
10433  {
10434 -	spin_lock_bh(&sc->rx.rxflushlock);
10435  	sc->sc_flags |= SC_OP_RXFLUSH;
10436  	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
10437  		ath_rx_tasklet(sc, 1, true);
10438  	ath_rx_tasklet(sc, 1, false);
10439  	sc->sc_flags &= ~SC_OP_RXFLUSH;
10440 -	spin_unlock_bh(&sc->rx.rxflushlock);
10441  }
10442  
10443  static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
10444 @@ -631,7 +630,7 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
10445  		 * No more broadcast/multicast frames to be received at this
10446  		 * point.
10447  		 */
10448 -		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
10449 +		sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
10450  		ath_print(common, ATH_DBG_PS,
10451  			  "All PS CAB frames received, back to sleep\n");
10452  	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
10453 @@ -870,15 +869,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
10454  		if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
10455  			*decrypt_error = true;
10456  		} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
10457 -			if (ieee80211_is_ctl(fc))
10458 -				/*
10459 -				 * Sometimes, we get invalid
10460 -				 * MIC failures on valid control frames.
10461 -				 * Remove these mic errors.
10462 -				 */
10463 -				rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
10464 -			else
10465 +			/*
10466 +			 * The MIC error bit is only valid if the frame
10467 +			 * is not a control frame or fragment, and it was
10468 +			 * decrypted using a valid TKIP key.
10469 +			 */
10470 +			if (!ieee80211_is_ctl(fc) &&
10471 +			    !ieee80211_has_morefrags(fc) &&
10472 +			    !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
10473 +			    test_bit(rx_stats->rs_keyix, common->tkip_keymap))
10474  				rxs->flag |= RX_FLAG_MMIC_ERROR;
10475 +			else
10476 +				rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
10477  		}
10478  		/*
10479  		 * Reject error frames with the exception of
10480 @@ -1033,9 +1035,11 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
10481  	int hdrlen, padpos, padsize;
10482  	u8 keyix;
10483  	__le16 fc;
10484 +	bool is_mc;
10485  
10486  	/* see if any padding is done by the hw and remove it */
10487  	hdr = (struct ieee80211_hdr *) skb->data;
10488 +	is_mc = !!is_multicast_ether_addr(hdr->addr1);
10489  	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
10490  	fc = hdr->frame_control;
10491  	padpos = ath9k_cmn_padpos(hdr->frame_control);
10492 @@ -1056,7 +1060,7 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
10493  
10494  	keyix = rx_stats->rs_keyix;
10495  
10496 -	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
10497 +	if ((is_mc || !(keyix == ATH9K_RXKEYIX_INVALID)) && !decrypt_error &&
10498  	    ieee80211_has_protected(fc)) {
10499  		rxs->flag |= RX_FLAG_DECRYPTED;
10500  	} else if (ieee80211_has_protected(fc)
10501 @@ -1096,6 +1100,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
10502  	u8 rx_status_len = ah->caps.rx_status_len;
10503  	u64 tsf = 0;
10504  	u32 tsf_lower = 0;
10505 +	unsigned long flags;
10506  
10507  	if (edma)
10508  		dma_type = DMA_BIDIRECTIONAL;
10509 @@ -1204,11 +1209,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
10510  			sc->rx.rxotherant = 0;
10511  		}
10512  
10513 +		spin_lock_irqsave(&sc->sc_pm_lock, flags);
10514  		if (unlikely(ath9k_check_auto_sleep(sc) ||
10515  			     (sc->ps_flags & (PS_WAIT_FOR_BEACON |
10516  					      PS_WAIT_FOR_CAB |
10517  					      PS_WAIT_FOR_PSPOLL_DATA))))
10518  			ath_rx_ps(sc, skb);
10519 +		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
10520  
10521  		ath_rx_send_to_mac80211(hw, sc, skb, rxs);
10522  
10523 diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
10524 index d01c4ad..86b0a4b 100644
10525 --- a/drivers/net/wireless/ath/ath9k/reg.h
10526 +++ b/drivers/net/wireless/ath/ath9k/reg.h
10527 @@ -709,6 +709,7 @@
10528  #define AR_WA_RESET_EN                  (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
10529  #define AR_WA_ANALOG_SHIFT              (1 << 20)
10530  #define AR_WA_POR_SHORT                 (1 << 21) /* PCI-E Phy reset control */
10531 +#define AR_WA_BIT22			(1 << 22)
10532  #define AR9285_WA_DEFAULT		0x004a050b
10533  #define AR9280_WA_DEFAULT           	0x0040073b
10534  #define AR_WA_DEFAULT               	0x0000073f
10535 @@ -900,7 +901,13 @@
10536  #define AR_DEVID_7010(_ah) \
10537  	(((_ah)->hw_version.devid == 0x7010) || \
10538  	 ((_ah)->hw_version.devid == 0x7015) || \
10539 -	 ((_ah)->hw_version.devid == 0x9018))
10540 +	 ((_ah)->hw_version.devid == 0x9018) || \
10541 +	 ((_ah)->hw_version.devid == 0xA704) || \
10542 +	 ((_ah)->hw_version.devid == 0x1200))
10543 +
10544 +#define AR9287_HTC_DEVID(_ah) \
10545 +	(((_ah)->hw_version.devid == 0x7015) || \
10546 +	 ((_ah)->hw_version.devid == 0x1200))
10547  
10548  #define AR_RADIO_SREV_MAJOR                   0xf0
10549  #define AR_RAD5133_SREV_MAJOR                 0xc0
10550 @@ -1012,11 +1019,13 @@ enum {
10551  #define AR9287_GPIO_IN_VAL_S                     11
10552  #define AR9271_GPIO_IN_VAL                       0xFFFF0000
10553  #define AR9271_GPIO_IN_VAL_S                     16
10554 -#define AR9300_GPIO_IN_VAL                       0x0001FFFF
10555 -#define AR9300_GPIO_IN_VAL_S                     0
10556  #define AR7010_GPIO_IN_VAL                       0x0000FFFF
10557  #define AR7010_GPIO_IN_VAL_S                     0
10558  
10559 +#define AR_GPIO_IN				 0x404c
10560 +#define AR9300_GPIO_IN_VAL                       0x0001FFFF
10561 +#define AR9300_GPIO_IN_VAL_S                     0
10562 +
10563  #define AR_GPIO_OE_OUT                           (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
10564  #define AR_GPIO_OE_OUT_DRV                       0x3
10565  #define AR_GPIO_OE_OUT_DRV_NO                    0x0
10566 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
10567 index 4dda14e..8f00c6c 100644
10568 --- a/drivers/net/wireless/ath/ath9k/xmit.c
10569 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
10570 @@ -61,6 +61,8 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
10571  			      struct ath_tx_status *ts, int txok);
10572  static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
10573  			     int nbad, int txok, bool update_rc);
10574 +static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
10575 +			      int seqno);
10576  
10577  enum {
10578  	MCS_HT20,
10579 @@ -143,18 +145,23 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
10580  	struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
10581  	struct ath_buf *bf;
10582  	struct list_head bf_head;
10583 -	INIT_LIST_HEAD(&bf_head);
10584 +	struct ath_tx_status ts;
10585  
10586 -	WARN_ON(!tid->paused);
10587 +	INIT_LIST_HEAD(&bf_head);
10588  
10589 +	memset(&ts, 0, sizeof(ts));
10590  	spin_lock_bh(&txq->axq_lock);
10591 -	tid->paused = false;
10592  
10593  	while (!list_empty(&tid->buf_q)) {
10594  		bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
10595 -		BUG_ON(bf_isretried(bf));
10596  		list_move_tail(&bf->list, &bf_head);
10597 -		ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
10598 +
10599 +		if (bf_isretried(bf)) {
10600 +			ath_tx_update_baw(sc, tid, bf->bf_seqno);
10601 +			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
10602 +		} else {
10603 +			ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
10604 +		}
10605  	}
10606  
10607  	spin_unlock_bh(&txq->axq_lock);
10608 @@ -312,6 +319,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
10609  	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
10610  	bool rc_update = true;
10611  	struct ieee80211_tx_rate rates[4];
10612 +	int nframes;
10613  
10614  	skb = bf->bf_mpdu;
10615  	hdr = (struct ieee80211_hdr *)skb->data;
10616 @@ -320,6 +328,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
10617  	hw = bf->aphy->hw;
10618  
10619  	memcpy(rates, tx_info->control.rates, sizeof(rates));
10620 +	nframes = bf->bf_nframes;
10621  
10622  	rcu_read_lock();
10623  
10624 @@ -337,7 +346,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
10625  			    !bf->bf_stale || bf_next != NULL)
10626  				list_move_tail(&bf->list, &bf_head);
10627  
10628 -			ath_tx_rc_status(bf, ts, 0, 0, false);
10629 +			ath_tx_rc_status(bf, ts, 1, 0, false);
10630  			ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
10631  				0, 0);
10632  
10633 @@ -431,7 +440,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
10634  			list_move_tail(&bf->list, &bf_head);
10635  		}
10636  
10637 -		if (!txpending) {
10638 +		if (!txpending || (tid->state & AGGR_CLEANUP)) {
10639  			/*
10640  			 * complete the acked-ones/xretried ones; update
10641  			 * block-ack window
10642 @@ -442,6 +451,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
10643  
10644  			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
10645  				memcpy(tx_info->control.rates, rates, sizeof(rates));
10646 +				bf->bf_nframes = nframes;
10647  				ath_tx_rc_status(bf, ts, nbad, txok, true);
10648  				rc_update = false;
10649  			} else {
10650 @@ -510,15 +520,12 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
10651  	}
10652  
10653  	if (tid->state & AGGR_CLEANUP) {
10654 +		ath_tx_flush_tid(sc, tid);
10655 +
10656  		if (tid->baw_head == tid->baw_tail) {
10657  			tid->state &= ~AGGR_ADDBA_COMPLETE;
10658  			tid->state &= ~AGGR_CLEANUP;
10659 -
10660 -			/* send buffered frames as singles */
10661 -			ath_tx_flush_tid(sc, tid);
10662  		}
10663 -		rcu_read_unlock();
10664 -		return;
10665  	}
10666  
10667  	rcu_read_unlock();
10668 @@ -785,17 +792,23 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
10669  		 status != ATH_AGGR_BAW_CLOSED);
10670  }
10671  
10672 -void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
10673 -		       u16 tid, u16 *ssn)
10674 +int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
10675 +		      u16 tid, u16 *ssn)
10676  {
10677  	struct ath_atx_tid *txtid;
10678  	struct ath_node *an;
10679  
10680  	an = (struct ath_node *)sta->drv_priv;
10681  	txtid = ATH_AN_2_TID(an, tid);
10682 +
10683 +	if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
10684 +		return -EAGAIN;
10685 +
10686  	txtid->state |= AGGR_ADDBA_PROGRESS;
10687  	txtid->paused = true;
10688  	*ssn = txtid->seq_start;
10689 +
10690 +	return 0;
10691  }
10692  
10693  void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
10694 @@ -803,12 +816,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
10695  	struct ath_node *an = (struct ath_node *)sta->drv_priv;
10696  	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
10697  	struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
10698 -	struct ath_tx_status ts;
10699 -	struct ath_buf *bf;
10700 -	struct list_head bf_head;
10701 -
10702 -	memset(&ts, 0, sizeof(ts));
10703 -	INIT_LIST_HEAD(&bf_head);
10704  
10705  	if (txtid->state & AGGR_CLEANUP)
10706  		return;
10707 @@ -818,31 +825,22 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
10708  		return;
10709  	}
10710  
10711 -	/* drop all software retried frames and mark this TID */
10712  	spin_lock_bh(&txq->axq_lock);
10713  	txtid->paused = true;
10714 -	while (!list_empty(&txtid->buf_q)) {
10715 -		bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
10716 -		if (!bf_isretried(bf)) {
10717 -			/*
10718 -			 * NB: it's based on the assumption that
10719 -			 * software retried frame will always stay
10720 -			 * at the head of software queue.
10721 -			 */
10722 -			break;
10723 -		}
10724 -		list_move_tail(&bf->list, &bf_head);
10725 -		ath_tx_update_baw(sc, txtid, bf->bf_seqno);
10726 -		ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
10727 -	}
10728 -	spin_unlock_bh(&txq->axq_lock);
10729  
10730 -	if (txtid->baw_head != txtid->baw_tail) {
10731 +	/*
10732 +	 * If frames are still being transmitted for this TID, they will be
10733 +	 * cleaned up during tx completion. To prevent race conditions, this
10734 +	 * TID can only be reused after all in-progress subframes have been
10735 +	 * completed.
10736 +	 */
10737 +	if (txtid->baw_head != txtid->baw_tail)
10738  		txtid->state |= AGGR_CLEANUP;
10739 -	} else {
10740 +	else
10741  		txtid->state &= ~AGGR_ADDBA_COMPLETE;
10742 -		ath_tx_flush_tid(sc, txtid);
10743 -	}
10744 +	spin_unlock_bh(&txq->axq_lock);
10745 +
10746 +	ath_tx_flush_tid(sc, txtid);
10747  }
10748  
10749  void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
10750 @@ -1103,15 +1101,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
10751  	txq->axq_tx_inprogress = false;
10752  	spin_unlock_bh(&txq->axq_lock);
10753  
10754 -	/* flush any pending frames if aggregation is enabled */
10755 -	if (sc->sc_flags & SC_OP_TXAGGR) {
10756 -		if (!retry_tx) {
10757 -			spin_lock_bh(&txq->axq_lock);
10758 -			ath_txq_drain_pending_buffers(sc, txq);
10759 -			spin_unlock_bh(&txq->axq_lock);
10760 -		}
10761 -	}
10762 -
10763  	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
10764  		spin_lock_bh(&txq->axq_lock);
10765  		while (!list_empty(&txq->txq_fifo_pending)) {
10766 @@ -1132,6 +1121,15 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
10767  		}
10768  		spin_unlock_bh(&txq->axq_lock);
10769  	}
10770 +
10771 +	/* flush any pending frames if aggregation is enabled */
10772 +	if (sc->sc_flags & SC_OP_TXAGGR) {
10773 +		if (!retry_tx) {
10774 +			spin_lock_bh(&txq->axq_lock);
10775 +			ath_txq_drain_pending_buffers(sc, txq);
10776 +			spin_unlock_bh(&txq->axq_lock);
10777 +		}
10778 +	}
10779  }
10780  
10781  void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
10782 @@ -1162,13 +1160,13 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
10783  		ath_print(common, ATH_DBG_FATAL,
10784  			  "Failed to stop TX DMA. Resetting hardware!\n");
10785  
10786 -		spin_lock_bh(&sc->sc_resetlock);
10787 +		spin_lock_bh(&sc->sc_pcu_lock);
10788  		r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
10789  		if (r)
10790  			ath_print(common, ATH_DBG_FATAL,
10791  				  "Unable to reset hardware; reset status %d\n",
10792  				  r);
10793 -		spin_unlock_bh(&sc->sc_resetlock);
10794 +		spin_unlock_bh(&sc->sc_pcu_lock);
10795  	}
10796  
10797  	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
10798 @@ -2024,9 +2022,15 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
10799  
10800  	if (ts->ts_status & ATH9K_TXERR_FILT)
10801  		tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
10802 -	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
10803 +	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
10804  		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
10805  
10806 +		BUG_ON(nbad > bf->bf_nframes);
10807 +
10808 +		tx_info->status.ampdu_len = bf->bf_nframes;
10809 +		tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
10810 +	}
10811 +
10812  	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
10813  	    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
10814  		if (ieee80211_is_data(hdr->frame_control)) {
10815 @@ -2036,8 +2040,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
10816  			if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
10817  			    (ts->ts_status & ATH9K_TXERR_FIFO))
10818  				tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
10819 -			tx_info->status.ampdu_len = bf->bf_nframes;
10820 -			tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
10821  		}
10822  	}
10823  
10824 @@ -2159,7 +2161,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
10825  			 */
10826  			if (ts.ts_status & ATH9K_TXERR_XRETRY)
10827  				bf->bf_state.bf_type |= BUF_XRETRY;
10828 -			ath_tx_rc_status(bf, &ts, 0, txok, true);
10829 +			ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
10830  		}
10831  
10832  		if (bf_isampdu(bf))
10833 @@ -2204,7 +2206,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
10834  		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
10835  			  "tx hung, resetting the chip\n");
10836  		ath9k_ps_wakeup(sc);
10837 -		ath_reset(sc, false);
10838 +		ath_reset(sc, true);
10839  		ath9k_ps_restore(sc);
10840  	}
10841  
10842 @@ -2288,7 +2290,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
10843  		if (!bf_isampdu(bf)) {
10844  			if (txs.ts_status & ATH9K_TXERR_XRETRY)
10845  				bf->bf_state.bf_type |= BUF_XRETRY;
10846 -			ath_tx_rc_status(bf, &txs, 0, txok, true);
10847 +			ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
10848  		}
10849  
10850  		if (bf_isampdu(bf))
10851 diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
10852 index 45933cf..09e2dfd 100644
10853 --- a/drivers/net/wireless/b43/sdio.c
10854 +++ b/drivers/net/wireless/b43/sdio.c
10855 @@ -163,6 +163,7 @@ static int b43_sdio_probe(struct sdio_func *func,
10856  err_free_ssb:
10857  	kfree(sdio);
10858  err_disable_func:
10859 +	sdio_claim_host(func);
10860  	sdio_disable_func(func);
10861  err_release_host:
10862  	sdio_release_host(func);
10863 @@ -175,7 +176,9 @@ static void b43_sdio_remove(struct sdio_func *func)
10864  	struct b43_sdio *sdio = sdio_get_drvdata(func);
10865  
10866  	ssb_bus_unregister(&sdio->ssb);
10867 +	sdio_claim_host(func);
10868  	sdio_disable_func(func);
10869 +	sdio_release_host(func);
10870  	kfree(sdio);
10871  	sdio_set_drvdata(func, NULL);
10872  }
10873 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
10874 index 10d7b9b..f735117 100644
10875 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
10876 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
10877 @@ -1234,6 +1234,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
10878  	/* only Re-enable if diabled by irq */
10879  	if (test_bit(STATUS_INT_ENABLED, &priv->status))
10880  		iwl_enable_interrupts(priv);
10881 +	/* Re-enable RF_KILL if it occurred */
10882 +	else if (handled & CSR_INT_BIT_RF_KILL)
10883 +		iwl_enable_rfkill_int(priv);
10884  
10885  #ifdef CONFIG_IWLWIFI_DEBUG
10886  	if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
10887 @@ -1449,6 +1452,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
10888  	/* only Re-enable if diabled by irq */
10889  	if (test_bit(STATUS_INT_ENABLED, &priv->status))
10890  		iwl_enable_interrupts(priv);
10891 +	/* Re-enable RF_KILL if it occurred */
10892 +	else if (handled & CSR_INT_BIT_RF_KILL)
10893 +		iwl_enable_rfkill_int(priv);
10894  }
10895  
10896  /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
10897 @@ -3260,9 +3266,10 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
10898  
10899  	flush_workqueue(priv->workqueue);
10900  
10901 -	/* enable interrupts again in order to receive rfkill changes */
10902 +	/* User space software may expect getting rfkill changes
10903 +	 * even if interface is down */
10904  	iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
10905 -	iwl_enable_interrupts(priv);
10906 +	iwl_enable_rfkill_int(priv);
10907  
10908  	IWL_DEBUG_MAC80211(priv, "leave\n");
10909  }
10910 @@ -4103,14 +4110,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10911  	 * 8. Enable interrupts and read RFKILL state
10912  	 *********************************************/
10913  
10914 -	/* enable interrupts if needed: hw bug w/a */
10915 +	/* enable rfkill interrupt: hw bug w/a */
10916  	pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
10917  	if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
10918  		pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
10919  		pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
10920  	}
10921  
10922 -	iwl_enable_interrupts(priv);
10923 +	iwl_enable_rfkill_int(priv);
10924  
10925  	/* If platform's RF_KILL switch is NOT set to KILL */
10926  	if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
10927 diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
10928 index 621abe3..1c6add9 100644
10929 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
10930 +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
10931 @@ -168,6 +168,12 @@ static inline void iwl_disable_interrupts(struct iwl_priv *priv)
10932  	IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
10933  }
10934  
10935 +static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
10936 +{
10937 +	IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
10938 +	iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
10939 +}
10940 +
10941  static inline void iwl_enable_interrupts(struct iwl_priv *priv)
10942  {
10943  	IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
10944 diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
10945 index e8e2d0f..f800ef4 100644
10946 --- a/drivers/net/wireless/orinoco/main.c
10947 +++ b/drivers/net/wireless/orinoco/main.c
10948 @@ -1813,6 +1813,12 @@ static int __orinoco_commit(struct orinoco_private *priv)
10949  	struct net_device *dev = priv->ndev;
10950  	int err = 0;
10951  
10952 +	/* If we've called commit, we are reconfiguring or bringing the
10953 +	 * interface up. Maintaining countermeasures across this would
10954 +	 * be confusing, so note that we've disabled them. The port will
10955 +	 * be enabled later in orinoco_commit or __orinoco_up. */
10956 +	priv->tkip_cm_active = 0;
10957 +
10958  	err = orinoco_hw_program_rids(priv);
10959  
10960  	/* FIXME: what about netif_tx_lock */
10961 diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
10962 index ef46a2d..083999f 100644
10963 --- a/drivers/net/wireless/orinoco/orinoco_cs.c
10964 +++ b/drivers/net/wireless/orinoco/orinoco_cs.c
10965 @@ -248,20 +248,20 @@ orinoco_cs_config(struct pcmcia_device *link)
10966  		goto failed;
10967  	}
10968  
10969 -	ret = pcmcia_request_irq(link, orinoco_interrupt);
10970 -	if (ret)
10971 -		goto failed;
10972 -
10973 -	/* We initialize the hermes structure before completing PCMCIA
10974 -	 * configuration just in case the interrupt handler gets
10975 -	 * called. */
10976  	mem = ioport_map(link->resource[0]->start,
10977  			resource_size(link->resource[0]));
10978  	if (!mem)
10979  		goto failed;
10980  
10981 +	/* We initialize the hermes structure before completing PCMCIA
10982 +	 * configuration just in case the interrupt handler gets
10983 +	 * called. */
10984  	hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
10985  
10986 +	ret = pcmcia_request_irq(link, orinoco_interrupt);
10987 +	if (ret)
10988 +		goto failed;
10989 +
10990  	/*
10991  	 * This actually configures the PCMCIA socket -- setting up
10992  	 * the I/O windows and the interrupt mapping, and putting the
10993 diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
10994 index 873877e..93070a3 100644
10995 --- a/drivers/net/wireless/orinoco/spectrum_cs.c
10996 +++ b/drivers/net/wireless/orinoco/spectrum_cs.c
10997 @@ -310,21 +310,21 @@ spectrum_cs_config(struct pcmcia_device *link)
10998  		goto failed;
10999  	}
11000  
11001 -	ret = pcmcia_request_irq(link, orinoco_interrupt);
11002 -	if (ret)
11003 -		goto failed;
11004 -
11005 -	/* We initialize the hermes structure before completing PCMCIA
11006 -	 * configuration just in case the interrupt handler gets
11007 -	 * called. */
11008  	mem = ioport_map(link->resource[0]->start,
11009  			resource_size(link->resource[0]));
11010  	if (!mem)
11011  		goto failed;
11012  
11013 +	/* We initialize the hermes structure before completing PCMCIA
11014 +	 * configuration just in case the interrupt handler gets
11015 +	 * called. */
11016  	hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
11017  	hw->eeprom_pda = true;
11018  
11019 +	ret = pcmcia_request_irq(link, orinoco_interrupt);
11020 +	if (ret)
11021 +		goto failed;
11022 +
11023  	/*
11024  	 * This actually configures the PCMCIA socket -- setting up
11025  	 * the I/O windows and the interrupt mapping, and putting the
11026 diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
11027 index cf7be1e..56aab61 100644
11028 --- a/drivers/net/wireless/orinoco/wext.c
11029 +++ b/drivers/net/wireless/orinoco/wext.c
11030 @@ -904,10 +904,10 @@ static int orinoco_ioctl_set_auth(struct net_device *dev,
11031  		 */
11032  		if (param->value) {
11033  			priv->tkip_cm_active = 1;
11034 -			ret = hermes_enable_port(hw, 0);
11035 +			ret = hermes_disable_port(hw, 0);
11036  		} else {
11037  			priv->tkip_cm_active = 0;
11038 -			ret = hermes_disable_port(hw, 0);
11039 +			ret = hermes_enable_port(hw, 0);
11040  		}
11041  		break;
11042  
11043 diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
11044 index 78347041..0a7ce37 100644
11045 --- a/drivers/net/wireless/p54/eeprom.c
11046 +++ b/drivers/net/wireless/p54/eeprom.c
11047 @@ -260,8 +260,10 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
11048  	list->max_entries = max_channel_num;
11049  	list->channels = kzalloc(sizeof(struct p54_channel_entry) *
11050  				 max_channel_num, GFP_KERNEL);
11051 -	if (!list->channels)
11052 +	if (!list->channels) {
11053 +		ret = -ENOMEM;
11054  		goto free;
11055 +	}
11056  
11057  	for (i = 0; i < max_channel_num; i++) {
11058  		if (i < priv->iq_autocal_len) {
11059 diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
11060 index ad59595..2325e56 100644
11061 --- a/drivers/net/wireless/p54/p54usb.c
11062 +++ b/drivers/net/wireless/p54/p54usb.c
11063 @@ -33,8 +33,18 @@ MODULE_ALIAS("prism54usb");
11064  MODULE_FIRMWARE("isl3886usb");
11065  MODULE_FIRMWARE("isl3887usb");
11066  
11067 +/*
11068 + * Note:
11069 + *
11070 + * Always update our wiki's device list (located at:
11071 + * http://wireless.kernel.org/en/users/Drivers/p54/devices ),
11072 + * whenever you add a new device.
11073 + */
11074 +
11075  static struct usb_device_id p54u_table[] __devinitdata = {
11076  	/* Version 1 devices (pci chip + net2280) */
11077 +	{USB_DEVICE(0x0411, 0x0050)},	/* Buffalo WLI2-USB2-G54 */
11078 +	{USB_DEVICE(0x045e, 0x00c2)},	/* Microsoft MN-710 */
11079  	{USB_DEVICE(0x0506, 0x0a11)},	/* 3COM 3CRWE254G72 */
11080  	{USB_DEVICE(0x06b9, 0x0120)},	/* Thomson SpeedTouch 120g */
11081  	{USB_DEVICE(0x0707, 0xee06)},	/* SMC 2862W-G */
11082 @@ -47,7 +57,13 @@ static struct usb_device_id p54u_table[] __devinitdata = {
11083  	{USB_DEVICE(0x0846, 0x4220)},	/* Netgear WG111 */
11084  	{USB_DEVICE(0x09aa, 0x1000)},	/* Spinnaker Proto board */
11085  	{USB_DEVICE(0x0cde, 0x0006)},	/* Medion 40900, Roper Europe */
11086 +	{USB_DEVICE(0x0db0, 0x6826)},	/* MSI UB54G (MS-6826) */
11087 +	{USB_DEVICE(0x107b, 0x55f2)},	/* Gateway WGU-210 (Gemtek) */
11088  	{USB_DEVICE(0x124a, 0x4023)},	/* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
11089 +	{USB_DEVICE(0x1435, 0x0210)},	/* Inventel UR054G */
11090 +	{USB_DEVICE(0x15a9, 0x0002)},	/* Gemtek WUBI-100GW 802.11g */
11091 +	{USB_DEVICE(0x1630, 0x0005)},	/* 2Wire 802.11g USB (v1) / Z-Com */
11092 +	{USB_DEVICE(0x182d, 0x096b)},	/* Sitecom WL-107 */
11093  	{USB_DEVICE(0x1915, 0x2234)},	/* Linksys WUSB54G OEM */
11094  	{USB_DEVICE(0x1915, 0x2235)},	/* Linksys WUSB54G Portable OEM */
11095  	{USB_DEVICE(0x2001, 0x3701)},	/* DLink DWL-G120 Spinnaker */
11096 @@ -60,6 +76,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
11097  	{USB_DEVICE(0x050d, 0x7050)},	/* Belkin F5D7050 ver 1000 */
11098  	{USB_DEVICE(0x0572, 0x2000)},	/* Cohiba Proto board */
11099  	{USB_DEVICE(0x0572, 0x2002)},	/* Cohiba Proto board */
11100 +	{USB_DEVICE(0x06a9, 0x000e)},	/* Westell 802.11g USB (A90-211WG-01) */
11101  	{USB_DEVICE(0x06b9, 0x0121)},	/* Thomson SpeedTouch 121g */
11102  	{USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
11103  	{USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
11104 @@ -80,7 +97,9 @@ static struct usb_device_id p54u_table[] __devinitdata = {
11105  	{USB_DEVICE(0x13B1, 0x000C)},	/* Linksys WUSB54AG */
11106  	{USB_DEVICE(0x1413, 0x5400)},   /* Telsey 802.11g USB2.0 Adapter */
11107  	{USB_DEVICE(0x1435, 0x0427)},	/* Inventel UR054G */
11108 +	{USB_DEVICE(0x1668, 0x1050)},	/* Actiontec 802UIG-1 */
11109  	{USB_DEVICE(0x2001, 0x3704)},	/* DLink DWL-G122 rev A2 */
11110 +	{USB_DEVICE(0x2001, 0x3705)},	/* D-Link DWL-G120 rev C1 */
11111  	{USB_DEVICE(0x413c, 0x5513)},	/* Dell WLA3310 USB Wireless Adapter */
11112  	{USB_DEVICE(0x413c, 0x8102)},	/* Spinnaker DUT */
11113  	{USB_DEVICE(0x413c, 0x8104)},	/* Cohiba Proto board */
11114 @@ -930,8 +949,8 @@ static int __devinit p54u_probe(struct usb_interface *intf,
11115  #ifdef CONFIG_PM
11116  		/* ISL3887 needs a full reset on resume */
11117  		udev->reset_resume = 1;
11118 +#endif /* CONFIG_PM */
11119  		err = p54u_device_reset(dev);
11120 -#endif
11121  
11122  		priv->hw_type = P54U_3887;
11123  		dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
11124 diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
11125 index 0e937dc..3d53113 100644
11126 --- a/drivers/net/wireless/p54/txrx.c
11127 +++ b/drivers/net/wireless/p54/txrx.c
11128 @@ -618,7 +618,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
11129  	else
11130  		*burst_possible = false;
11131  
11132 -	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
11133 +	if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
11134  		*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
11135  
11136  	if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
11137 diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
11138 index 5063e01..6a6cd71 100644
11139 --- a/drivers/net/wireless/rt2x00/rt2400pci.c
11140 +++ b/drivers/net/wireless/rt2x00/rt2400pci.c
11141 @@ -1488,8 +1488,10 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
11142  	spec->channels_info = info;
11143  
11144  	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
11145 -	for (i = 0; i < 14; i++)
11146 -		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11147 +	for (i = 0; i < 14; i++) {
11148 +		info[i].max_power = TXPOWER_FROM_DEV(MAX_TXPOWER);
11149 +		info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11150 +	}
11151  
11152  	return 0;
11153  }
11154 diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
11155 index c2a555d..ec3e8b3 100644
11156 --- a/drivers/net/wireless/rt2x00/rt2500pci.c
11157 +++ b/drivers/net/wireless/rt2x00/rt2500pci.c
11158 @@ -1802,12 +1802,16 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
11159  	spec->channels_info = info;
11160  
11161  	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
11162 -	for (i = 0; i < 14; i++)
11163 -		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11164 +	for (i = 0; i < 14; i++) {
11165 +		info[i].max_power = MAX_TXPOWER;
11166 +		info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11167 +	}
11168  
11169  	if (spec->num_channels > 14) {
11170 -		for (i = 14; i < spec->num_channels; i++)
11171 -			info[i].tx_power1 = DEFAULT_TXPOWER;
11172 +		for (i = 14; i < spec->num_channels; i++) {
11173 +			info[i].max_power = MAX_TXPOWER;
11174 +			info[i].default_power1 = DEFAULT_TXPOWER;
11175 +		}
11176  	}
11177  
11178  	return 0;
11179 diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
11180 index cdaf93f..ed4de3f 100644
11181 --- a/drivers/net/wireless/rt2x00/rt2500usb.c
11182 +++ b/drivers/net/wireless/rt2x00/rt2500usb.c
11183 @@ -1705,12 +1705,16 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
11184  	spec->channels_info = info;
11185  
11186  	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
11187 -	for (i = 0; i < 14; i++)
11188 -		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11189 +	for (i = 0; i < 14; i++) {
11190 +		info[i].max_power = MAX_TXPOWER;
11191 +		info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11192 +	}
11193  
11194  	if (spec->num_channels > 14) {
11195 -		for (i = 14; i < spec->num_channels; i++)
11196 -			info[i].tx_power1 = DEFAULT_TXPOWER;
11197 +		for (i = 14; i < spec->num_channels; i++) {
11198 +			info[i].max_power = MAX_TXPOWER;
11199 +			info[i].default_power1 = DEFAULT_TXPOWER;
11200 +		}
11201  	}
11202  
11203  	return 0;
11204 diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
11205 index ed4ebcd..616b71a 100644
11206 --- a/drivers/net/wireless/rt2x00/rt2800.h
11207 +++ b/drivers/net/wireless/rt2x00/rt2800.h
11208 @@ -1841,6 +1841,13 @@ struct mac_iveiv_entry {
11209  #define EEPROM_RSSI_A2_LNA_A2		FIELD16(0xff00)
11210  
11211  /*
11212 + * EEPROM Maximum TX power values
11213 + */
11214 +#define EEPROM_MAX_TX_POWER		0x0027
11215 +#define EEPROM_MAX_TX_POWER_24GHZ	FIELD16(0x00ff)
11216 +#define EEPROM_MAX_TX_POWER_5GHZ	FIELD16(0xff00)
11217 +
11218 +/*
11219   * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
11220   *	This is delta in 40MHZ.
11221   * VALUE: Tx Power dalta value (MAX=4)
11222 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
11223 index b66e0fd..60039d3 100644
11224 --- a/drivers/net/wireless/rt2x00/rt2800lib.c
11225 +++ b/drivers/net/wireless/rt2x00/rt2800lib.c
11226 @@ -1120,27 +1120,27 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
11227  		 * double meaning, and we should set a 7DBm boost flag.
11228  		 */
11229  		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
11230 -				   (info->tx_power1 >= 0));
11231 +				   (info->default_power1 >= 0));
11232  
11233 -		if (info->tx_power1 < 0)
11234 -			info->tx_power1 += 7;
11235 +		if (info->default_power1 < 0)
11236 +			info->default_power1 += 7;
11237  
11238  		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
11239 -				   TXPOWER_A_TO_DEV(info->tx_power1));
11240 +				   TXPOWER_A_TO_DEV(info->default_power1));
11241  
11242  		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
11243 -				   (info->tx_power2 >= 0));
11244 +				   (info->default_power2 >= 0));
11245  
11246 -		if (info->tx_power2 < 0)
11247 -			info->tx_power2 += 7;
11248 +		if (info->default_power2 < 0)
11249 +			info->default_power2 += 7;
11250  
11251  		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
11252 -				   TXPOWER_A_TO_DEV(info->tx_power2));
11253 +				   TXPOWER_A_TO_DEV(info->default_power2));
11254  	} else {
11255  		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
11256 -				   TXPOWER_G_TO_DEV(info->tx_power1));
11257 +				   TXPOWER_G_TO_DEV(info->default_power1));
11258  		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
11259 -				   TXPOWER_G_TO_DEV(info->tx_power2));
11260 +				   TXPOWER_G_TO_DEV(info->default_power2));
11261  	}
11262  
11263  	rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
11264 @@ -1180,13 +1180,11 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
11265  	rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
11266  
11267  	rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
11268 -	rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
11269 -			  TXPOWER_G_TO_DEV(info->tx_power1));
11270 +	rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1);
11271  	rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
11272  
11273  	rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
11274 -	rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
11275 -			  TXPOWER_G_TO_DEV(info->tx_power2));
11276 +	rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2);
11277  	rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
11278  
11279  	rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
11280 @@ -2516,6 +2514,13 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
11281  				   default_lna_gain);
11282  	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
11283  
11284 +	rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
11285 +	if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
11286 +		rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
11287 +	if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
11288 +		rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
11289 +	rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
11290 +
11291  	return 0;
11292  }
11293  EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
11294 @@ -2755,9 +2760,10 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
11295  {
11296  	struct hw_mode_spec *spec = &rt2x00dev->spec;
11297  	struct channel_info *info;
11298 -	char *tx_power1;
11299 -	char *tx_power2;
11300 +	char *default_power1;
11301 +	char *default_power2;
11302  	unsigned int i;
11303 +	unsigned short max_power;
11304  	u16 eeprom;
11305  
11306  	/*
11307 @@ -2871,21 +2877,26 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
11308  
11309  	spec->channels_info = info;
11310  
11311 -	tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
11312 -	tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
11313 +	rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
11314 +	max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
11315 +	default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
11316 +	default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
11317  
11318  	for (i = 0; i < 14; i++) {
11319 -		info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
11320 -		info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
11321 +		info[i].max_power = max_power;
11322 +		info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]);
11323 +		info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
11324  	}
11325  
11326  	if (spec->num_channels > 14) {
11327 -		tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
11328 -		tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
11329 +		max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
11330 +		default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
11331 +		default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
11332  
11333  		for (i = 14; i < spec->num_channels; i++) {
11334 -			info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
11335 -			info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
11336 +			info[i].max_power = max_power;
11337 +			info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]);
11338 +			info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
11339  		}
11340  	}
11341  
11342 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
11343 index c21af38..6b2b92b 100644
11344 --- a/drivers/net/wireless/rt2x00/rt2x00.h
11345 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
11346 @@ -212,8 +212,9 @@ struct channel_info {
11347  	unsigned int flags;
11348  #define GEOGRAPHY_ALLOWED	0x00000001
11349  
11350 -	short tx_power1;
11351 -	short tx_power2;
11352 +	short max_power;
11353 +	short default_power1;
11354 +	short default_power2;
11355  };
11356  
11357  /*
11358 diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
11359 index 585e816..19f86ce 100644
11360 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c
11361 +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
11362 @@ -710,7 +710,7 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
11363  	for (i = 0; i < spec->num_channels; i++) {
11364  		rt2x00lib_channel(&channels[i],
11365  				  spec->channels[i].channel,
11366 -				  spec->channels_info[i].tx_power1, i);
11367 +				  spec->channels_info[i].max_power, i);
11368  	}
11369  
11370  	/*
11371 diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
11372 index e539c6c..73d6382 100644
11373 --- a/drivers/net/wireless/rt2x00/rt61pci.c
11374 +++ b/drivers/net/wireless/rt2x00/rt61pci.c
11375 @@ -2661,13 +2661,17 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
11376  	spec->channels_info = info;
11377  
11378  	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
11379 -	for (i = 0; i < 14; i++)
11380 -		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11381 +	for (i = 0; i < 14; i++) {
11382 +		info[i].max_power = MAX_TXPOWER;
11383 +		info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11384 +	}
11385  
11386  	if (spec->num_channels > 14) {
11387  		tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
11388 -		for (i = 14; i < spec->num_channels; i++)
11389 -			info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11390 +		for (i = 14; i < spec->num_channels; i++) {
11391 +			info[i].max_power = MAX_TXPOWER;
11392 +			info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11393 +		}
11394  	}
11395  
11396  	return 0;
11397 diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
11398 index aa9de18..c457d65 100644
11399 --- a/drivers/net/wireless/rt2x00/rt73usb.c
11400 +++ b/drivers/net/wireless/rt2x00/rt73usb.c
11401 @@ -2091,13 +2091,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
11402  	spec->channels_info = info;
11403  
11404  	tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
11405 -	for (i = 0; i < 14; i++)
11406 -		info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11407 +	for (i = 0; i < 14; i++) {
11408 +		info[i].max_power = MAX_TXPOWER;
11409 +		info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11410 +	}
11411  
11412  	if (spec->num_channels > 14) {
11413  		tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
11414 -		for (i = 14; i < spec->num_channels; i++)
11415 -			info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11416 +		for (i = 14; i < spec->num_channels; i++) {
11417 +			info[i].max_power = MAX_TXPOWER;
11418 +			info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
11419 +		}
11420  	}
11421  
11422  	return 0;
11423 @@ -2391,6 +2395,7 @@ static struct usb_device_id rt73usb_device_table[] = {
11424  	{ USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
11425  	{ USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
11426  	{ USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
11427 +	{ USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
11428  	/* Qcom */
11429  	{ USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
11430  	{ USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
11431 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
11432 index b50fedc..42dad59 100644
11433 --- a/drivers/net/xen-netfront.c
11434 +++ b/drivers/net/xen-netfront.c
11435 @@ -66,8 +66,8 @@ struct netfront_cb {
11436  
11437  #define GRANT_INVALID_REF	0
11438  
11439 -#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
11440 -#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
11441 +#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
11442 +#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
11443  #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
11444  
11445  struct netfront_info {
11446 diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
11447 index dc0ae4d..0107251 100644
11448 --- a/drivers/oprofile/timer_int.c
11449 +++ b/drivers/oprofile/timer_int.c
11450 @@ -21,6 +21,7 @@
11451  #include "oprof.h"
11452  
11453  static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
11454 +static int ctr_running;
11455  
11456  static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
11457  {
11458 @@ -33,6 +34,9 @@ static void __oprofile_hrtimer_start(void *unused)
11459  {
11460  	struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
11461  
11462 +	if (!ctr_running)
11463 +		return;
11464 +
11465  	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
11466  	hrtimer->function = oprofile_hrtimer_notify;
11467  
11468 @@ -42,7 +46,10 @@ static void __oprofile_hrtimer_start(void *unused)
11469  
11470  static int oprofile_hrtimer_start(void)
11471  {
11472 +	get_online_cpus();
11473 +	ctr_running = 1;
11474  	on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
11475 +	put_online_cpus();
11476  	return 0;
11477  }
11478  
11479 @@ -50,6 +57,9 @@ static void __oprofile_hrtimer_stop(int cpu)
11480  {
11481  	struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
11482  
11483 +	if (!ctr_running)
11484 +		return;
11485 +
11486  	hrtimer_cancel(hrtimer);
11487  }
11488  
11489 @@ -57,8 +67,11 @@ static void oprofile_hrtimer_stop(void)
11490  {
11491  	int cpu;
11492  
11493 +	get_online_cpus();
11494  	for_each_online_cpu(cpu)
11495  		__oprofile_hrtimer_stop(cpu);
11496 +	ctr_running = 0;
11497 +	put_online_cpus();
11498  }
11499  
11500  static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
11501 diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
11502 index 0a19708..a286959 100644
11503 --- a/drivers/pci/dmar.c
11504 +++ b/drivers/pci/dmar.c
11505 @@ -1414,6 +1414,11 @@ int __init enable_drhd_fault_handling(void)
11506  			       (unsigned long long)drhd->reg_base_addr, ret);
11507  			return -1;
11508  		}
11509 +
11510 +		/*
11511 +		 * Clear any previous faults.
11512 +		 */
11513 +		dmar_fault(iommu->irq, iommu);
11514  	}
11515  
11516  	return 0;
11517 diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
11518 index f7b68ca..4ae494b 100644
11519 --- a/drivers/pci/pci-stub.c
11520 +++ b/drivers/pci/pci-stub.c
11521 @@ -54,6 +54,9 @@ static int __init pci_stub_init(void)
11522  			subdevice = PCI_ANY_ID, class=0, class_mask=0;
11523  		int fields;
11524  
11525 +		if (!strlen(id))
11526 +			continue;
11527 +
11528  		fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
11529  				&vendor, &device, &subvendor, &subdevice,
11530  				&class, &class_mask);
11531 diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
11532 index b5a7d9b..4835a02 100644
11533 --- a/drivers/pci/pci-sysfs.c
11534 +++ b/drivers/pci/pci-sysfs.c
11535 @@ -705,17 +705,21 @@ void pci_remove_legacy_files(struct pci_bus *b)
11536  
11537  #ifdef HAVE_PCI_MMAP
11538  
11539 -int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
11540 +int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
11541 +		  enum pci_mmap_api mmap_api)
11542  {
11543 -	unsigned long nr, start, size;
11544 +	unsigned long nr, start, size, pci_start;
11545  
11546 +	if (pci_resource_len(pdev, resno) == 0)
11547 +		return 0;
11548  	nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
11549  	start = vma->vm_pgoff;
11550  	size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
11551 -	if (start < size && size - start >= nr)
11552 +	pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
11553 +			pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
11554 +	if (start >= pci_start && start < pci_start + size &&
11555 +			start + nr <= pci_start + size)
11556  		return 1;
11557 -	WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
11558 -		current->comm, start, start+nr, pci_name(pdev), resno, size);
11559  	return 0;
11560  }
11561  
11562 @@ -745,8 +749,14 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
11563  	if (i >= PCI_ROM_RESOURCE)
11564  		return -ENODEV;
11565  
11566 -	if (!pci_mmap_fits(pdev, i, vma))
11567 +	if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
11568 +		WARN(1, "process \"%s\" tried to map 0x%08lx bytes "
11569 +			"at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
11570 +			current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
11571 +			pci_name(pdev), i,
11572 +			pci_resource_start(pdev, i), pci_resource_len(pdev, i));
11573  		return -EINVAL;
11574 +	}
11575  
11576  	/* pci_mmap_page_range() expects the same kind of entry as coming
11577  	 * from /proc/bus/pci/ which is a "user visible" value. If this is
11578 diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
11579 index 6beb11b..1001b1d 100644
11580 --- a/drivers/pci/pci.h
11581 +++ b/drivers/pci/pci.h
11582 @@ -22,8 +22,13 @@ extern void pci_remove_firmware_label_files(struct pci_dev *pdev);
11583  #endif
11584  extern void pci_cleanup_rom(struct pci_dev *dev);
11585  #ifdef HAVE_PCI_MMAP
11586 +enum pci_mmap_api {
11587 +	PCI_MMAP_SYSFS,	/* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
11588 +	PCI_MMAP_PROCFS	/* mmap on /proc/bus/pci/<BDF> */
11589 +};
11590  extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
11591 -			 struct vm_area_struct *vma);
11592 +			 struct vm_area_struct *vmai,
11593 +			 enum pci_mmap_api mmap_api);
11594  #endif
11595  int pci_probe_reset_function(struct pci_dev *dev);
11596  
11597 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
11598 index 01f0306..4aae016 100644
11599 --- a/drivers/pci/proc.c
11600 +++ b/drivers/pci/proc.c
11601 @@ -260,7 +260,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
11602  
11603  	/* Make sure the caller is mapping a real resource for this device */
11604  	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
11605 -		if (pci_mmap_fits(dev, i, vma))
11606 +		if (pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
11607  			break;
11608  	}
11609  
11610 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
11611 index 857ae01..32ae818 100644
11612 --- a/drivers/pci/quirks.c
11613 +++ b/drivers/pci/quirks.c
11614 @@ -2714,6 +2714,29 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_m
11615  DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
11616  #endif /*CONFIG_MMC_RICOH_MMC*/
11617  
11618 +#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
11619 +#define VTUNCERRMSK_REG	0x1ac
11620 +#define VTD_MSK_SPEC_ERRORS	(1 << 31)
11621 +/*
11622 + * This is a quirk for masking vt-d spec defined errors to platform error
11623 + * handling logic. With out this, platforms using Intel 7500, 5500 chipsets
11624 + * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
11625 + * on the RAS config settings of the platform) when a vt-d fault happens.
11626 + * The resulting SMI caused the system to hang.
11627 + *
11628 + * VT-d spec related errors are already handled by the VT-d OS code, so no
11629 + * need to report the same error through other channels.
11630 + */
11631 +static void vtd_mask_spec_errors(struct pci_dev *dev)
11632 +{
11633 +	u32 word;
11634 +
11635 +	pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
11636 +	pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
11637 +}
11638 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
11639 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
11640 +#endif
11641  
11642  static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
11643  			  struct pci_fixup *end)
11644 diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
11645 index 6f1a86b..fd4c25a 100644
11646 --- a/drivers/pcmcia/soc_common.c
11647 +++ b/drivers/pcmcia/soc_common.c
11648 @@ -65,6 +65,7 @@ void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func,
11649  		va_end(args);
11650  	}
11651  }
11652 +EXPORT_SYMBOL(soc_pcmcia_debug);
11653  
11654  #endif
11655  
11656 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
11657 index b756e07..e8acb1c 100644
11658 --- a/drivers/platform/x86/asus-laptop.c
11659 +++ b/drivers/platform/x86/asus-laptop.c
11660 @@ -1065,9 +1065,9 @@ static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
11661   */
11662  static int asus_gps_rfkill_set(void *data, bool blocked)
11663  {
11664 -	acpi_handle handle = data;
11665 +	struct asus_laptop *asus = data;
11666  
11667 -	return asus_gps_switch(handle, !blocked);
11668 +	return asus_gps_switch(asus, !blocked);
11669  }
11670  
11671  static const struct rfkill_ops asus_gps_rfkill_ops = {
11672 @@ -1094,7 +1094,7 @@ static int asus_rfkill_init(struct asus_laptop *asus)
11673  
11674  	asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev,
11675  					RFKILL_TYPE_GPS,
11676 -					&asus_gps_rfkill_ops, NULL);
11677 +					&asus_gps_rfkill_ops, asus);
11678  	if (!asus->gps_rfkill)
11679  		return -EINVAL;
11680  
11681 diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
11682 index c44a5e8..f0b3ad1 100644
11683 --- a/drivers/platform/x86/intel_ips.c
11684 +++ b/drivers/platform/x86/intel_ips.c
11685 @@ -75,6 +75,7 @@
11686  #include <drm/i915_drm.h>
11687  #include <asm/msr.h>
11688  #include <asm/processor.h>
11689 +#include "intel_ips.h"
11690  
11691  #define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32
11692  
11693 @@ -245,6 +246,7 @@
11694  #define thm_writel(off, val) writel((val), ips->regmap + (off))
11695  
11696  static const int IPS_ADJUST_PERIOD = 5000; /* ms */
11697 +static bool late_i915_load = false;
11698  
11699  /* For initial average collection */
11700  static const int IPS_SAMPLE_PERIOD = 200; /* ms */
11701 @@ -339,6 +341,9 @@ struct ips_driver {
11702  	u64 orig_turbo_ratios;
11703  };
11704  
11705 +static bool
11706 +ips_gpu_turbo_enabled(struct ips_driver *ips);
11707 +
11708  /**
11709   * ips_cpu_busy - is CPU busy?
11710   * @ips: IPS driver struct
11711 @@ -517,7 +522,7 @@ static void ips_disable_cpu_turbo(struct ips_driver *ips)
11712   */
11713  static bool ips_gpu_busy(struct ips_driver *ips)
11714  {
11715 -	if (!ips->gpu_turbo_enabled)
11716 +	if (!ips_gpu_turbo_enabled(ips))
11717  		return false;
11718  
11719  	return ips->gpu_busy();
11720 @@ -532,7 +537,7 @@ static bool ips_gpu_busy(struct ips_driver *ips)
11721   */
11722  static void ips_gpu_raise(struct ips_driver *ips)
11723  {
11724 -	if (!ips->gpu_turbo_enabled)
11725 +	if (!ips_gpu_turbo_enabled(ips))
11726  		return;
11727  
11728  	if (!ips->gpu_raise())
11729 @@ -549,7 +554,7 @@ static void ips_gpu_raise(struct ips_driver *ips)
11730   */
11731  static void ips_gpu_lower(struct ips_driver *ips)
11732  {
11733 -	if (!ips->gpu_turbo_enabled)
11734 +	if (!ips_gpu_turbo_enabled(ips))
11735  		return;
11736  
11737  	if (!ips->gpu_lower())
11738 @@ -1454,6 +1459,31 @@ out_err:
11739  	return false;
11740  }
11741  
11742 +static bool
11743 +ips_gpu_turbo_enabled(struct ips_driver *ips)
11744 +{
11745 +	if (!ips->gpu_busy && late_i915_load) {
11746 +		if (ips_get_i915_syms(ips)) {
11747 +			dev_info(&ips->dev->dev,
11748 +				 "i915 driver attached, reenabling gpu turbo\n");
11749 +			ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
11750 +		}
11751 +	}
11752 +
11753 +	return ips->gpu_turbo_enabled;
11754 +}
11755 +
11756 +void
11757 +ips_link_to_i915_driver()
11758 +{
11759 +	/* We can't cleanly get at the various ips_driver structs from
11760 +	 * this caller (the i915 driver), so just set a flag saying
11761 +	 * that it's time to try getting the symbols again.
11762 +	 */
11763 +	late_i915_load = true;
11764 +}
11765 +EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
11766 +
11767  static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
11768  	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL,
11769  		     PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
11770 diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h
11771 new file mode 100644
11772 index 0000000..a777159
11773 --- /dev/null
11774 +++ b/drivers/platform/x86/intel_ips.h
11775 @@ -0,0 +1,22 @@
11776 +/*
11777 + * Copyright (c) 2010 Intel Corporation
11778 + *
11779 + * This program is free software; you can redistribute it and/or modify it
11780 + * under the terms and conditions of the GNU General Public License,
11781 + * version 2, as published by the Free Software Foundation.
11782 + *
11783 + * This program is distributed in the hope it will be useful, but WITHOUT
11784 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11785 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11786 + * more details.
11787 + *
11788 + * You should have received a copy of the GNU General Public License along with
11789 + * this program; if not, write to the Free Software Foundation, Inc.,
11790 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
11791 + *
11792 + * The full GNU General Public License is included in this distribution in
11793 + * the file called "COPYING".
11794 + */
11795 +
11796 +void ips_link_to_i915_driver(void);
11797 +
11798 diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
11799 index b2978a0..6777834 100644
11800 --- a/drivers/platform/x86/wmi.c
11801 +++ b/drivers/platform/x86/wmi.c
11802 @@ -801,7 +801,7 @@ static bool guid_already_parsed(const char *guid_string)
11803  		wblock = list_entry(p, struct wmi_block, list);
11804  		gblock = &wblock->gblock;
11805  
11806 -		if (strncmp(gblock->guid, guid_string, 16) == 0)
11807 +		if (memcmp(gblock->guid, guid_string, 16) == 0)
11808  			return true;
11809  	}
11810  	return false;
11811 diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
11812 index dc4e32e..0d943ee 100644
11813 --- a/drivers/pnp/pnpacpi/core.c
11814 +++ b/drivers/pnp/pnpacpi/core.c
11815 @@ -28,7 +28,7 @@
11816  #include "../base.h"
11817  #include "pnpacpi.h"
11818  
11819 -static int num = 0;
11820 +static int num;
11821  
11822  /* We need only to blacklist devices that have already an acpi driver that
11823   * can't use pnp layer. We don't need to blacklist device that are directly
11824 @@ -180,11 +180,24 @@ struct pnp_protocol pnpacpi_protocol = {
11825  };
11826  EXPORT_SYMBOL(pnpacpi_protocol);
11827  
11828 +static char *pnpacpi_get_id(struct acpi_device *device)
11829 +{
11830 +	struct acpi_hardware_id *id;
11831 +
11832 +	list_for_each_entry(id, &device->pnp.ids, list) {
11833 +		if (ispnpidacpi(id->id))
11834 +			return id->id;
11835 +	}
11836 +
11837 +	return NULL;
11838 +}
11839 +
11840  static int __init pnpacpi_add_device(struct acpi_device *device)
11841  {
11842  	acpi_handle temp = NULL;
11843  	acpi_status status;
11844  	struct pnp_dev *dev;
11845 +	char *pnpid;
11846  	struct acpi_hardware_id *id;
11847  
11848  	/*
11849 @@ -192,11 +205,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
11850  	 * driver should not be loaded.
11851  	 */
11852  	status = acpi_get_handle(device->handle, "_CRS", &temp);
11853 -	if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) ||
11854 -	    is_exclusive_device(device) || (!device->status.present))
11855 +	if (ACPI_FAILURE(status))
11856 +		return 0;
11857 +
11858 +	pnpid = pnpacpi_get_id(device);
11859 +	if (!pnpid)
11860 +		return 0;
11861 +
11862 +	if (is_exclusive_device(device) || !device->status.present)
11863  		return 0;
11864  
11865 -	dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
11866 +	dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid);
11867  	if (!dev)
11868  		return -ENOMEM;
11869  
11870 @@ -227,7 +246,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
11871  		pnpacpi_parse_resource_option_data(dev);
11872  
11873  	list_for_each_entry(id, &device->pnp.ids, list) {
11874 -		if (!strcmp(id->id, acpi_device_hid(device)))
11875 +		if (!strcmp(id->id, pnpid))
11876  			continue;
11877  		if (!ispnpidacpi(id->id))
11878  			continue;
11879 diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
11880 index 4d3b272..c8d26df 100644
11881 --- a/drivers/power/ds2760_battery.c
11882 +++ b/drivers/power/ds2760_battery.c
11883 @@ -212,7 +212,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
11884  	if (di->rem_capacity > 100)
11885  		di->rem_capacity = 100;
11886  
11887 -	if (di->current_uA >= 100L)
11888 +	if (di->current_uA < -100L)
11889  		di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
11890  					/ (di->current_uA / 100L);
11891  	else
11892 diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
11893 index 20c4b95..a74cb0a 100644
11894 --- a/drivers/power/jz4740-battery.c
11895 +++ b/drivers/power/jz4740-battery.c
11896 @@ -47,6 +47,8 @@ struct jz_battery {
11897  
11898  	struct power_supply battery;
11899  	struct delayed_work work;
11900 +
11901 +	struct mutex lock;
11902  };
11903  
11904  static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy)
11905 @@ -68,6 +70,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
11906  	unsigned long val;
11907  	long voltage;
11908  
11909 +	mutex_lock(&battery->lock);
11910 +
11911  	INIT_COMPLETION(battery->read_completion);
11912  
11913  	enable_irq(battery->irq);
11914 @@ -91,6 +95,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
11915  	battery->cell->disable(battery->pdev);
11916  	disable_irq(battery->irq);
11917  
11918 +	mutex_unlock(&battery->lock);
11919 +
11920  	return voltage;
11921  }
11922  
11923 @@ -291,6 +297,7 @@ static int __devinit jz_battery_probe(struct platform_device *pdev)
11924  	jz_battery->pdev = pdev;
11925  
11926  	init_completion(&jz_battery->read_completion);
11927 +	mutex_init(&jz_battery->lock);
11928  
11929  	INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work);
11930  
11931 diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
11932 index aafc1c5..5bc1dcf 100644
11933 --- a/drivers/power/olpc_battery.c
11934 +++ b/drivers/power/olpc_battery.c
11935 @@ -271,14 +271,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
11936  		if (ret)
11937  			return ret;
11938  
11939 -		val->intval = (int)be16_to_cpu(ec_word) * 9760L / 32;
11940 +		val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
11941  		break;
11942  	case POWER_SUPPLY_PROP_CURRENT_AVG:
11943  		ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
11944  		if (ret)
11945  			return ret;
11946  
11947 -		val->intval = (int)be16_to_cpu(ec_word) * 15625L / 120;
11948 +		val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120;
11949  		break;
11950  	case POWER_SUPPLY_PROP_CAPACITY:
11951  		ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1);
11952 @@ -299,7 +299,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
11953  		if (ret)
11954  			return ret;
11955  
11956 -		val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
11957 +		val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
11958  		break;
11959  	case POWER_SUPPLY_PROP_TEMP_AMBIENT:
11960  		ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
11961 @@ -313,7 +313,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
11962  		if (ret)
11963  			return ret;
11964  
11965 -		val->intval = (int)be16_to_cpu(ec_word) * 6250 / 15;
11966 +		val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15;
11967  		break;
11968  	case POWER_SUPPLY_PROP_SERIAL_NUMBER:
11969  		ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
11970 diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
11971 index 5856167..dd8242d 100644
11972 --- a/drivers/rtc/rtc-cmos.c
11973 +++ b/drivers/rtc/rtc-cmos.c
11974 @@ -36,6 +36,7 @@
11975  #include <linux/platform_device.h>
11976  #include <linux/mod_devicetable.h>
11977  #include <linux/log2.h>
11978 +#include <linux/pm.h>
11979  
11980  /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
11981  #include <asm-generic/rtc.h>
11982 @@ -850,7 +851,7 @@ static void __exit cmos_do_remove(struct device *dev)
11983  
11984  #ifdef	CONFIG_PM
11985  
11986 -static int cmos_suspend(struct device *dev, pm_message_t mesg)
11987 +static int cmos_suspend(struct device *dev)
11988  {
11989  	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
11990  	unsigned char	tmp;
11991 @@ -898,7 +899,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
11992   */
11993  static inline int cmos_poweroff(struct device *dev)
11994  {
11995 -	return cmos_suspend(dev, PMSG_HIBERNATE);
11996 +	return cmos_suspend(dev);
11997  }
11998  
11999  static int cmos_resume(struct device *dev)
12000 @@ -945,9 +946,9 @@ static int cmos_resume(struct device *dev)
12001  	return 0;
12002  }
12003  
12004 +static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
12005 +
12006  #else
12007 -#define	cmos_suspend	NULL
12008 -#define	cmos_resume	NULL
12009  
12010  static inline int cmos_poweroff(struct device *dev)
12011  {
12012 @@ -1077,7 +1078,7 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
12013  
12014  static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
12015  {
12016 -	return cmos_suspend(&pnp->dev, mesg);
12017 +	return cmos_suspend(&pnp->dev);
12018  }
12019  
12020  static int cmos_pnp_resume(struct pnp_dev *pnp)
12021 @@ -1157,8 +1158,9 @@ static struct platform_driver cmos_platform_driver = {
12022  	.shutdown	= cmos_platform_shutdown,
12023  	.driver = {
12024  		.name		= (char *) driver_name,
12025 -		.suspend	= cmos_suspend,
12026 -		.resume		= cmos_resume,
12027 +#ifdef CONFIG_PM
12028 +		.pm		= &cmos_pm_ops,
12029 +#endif
12030  	}
12031  };
12032  
12033 diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
12034 index 90cf0a6..dd14e20 100644
12035 --- a/drivers/rtc/rtc-rs5c372.c
12036 +++ b/drivers/rtc/rtc-rs5c372.c
12037 @@ -207,7 +207,7 @@ static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
12038  static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
12039  {
12040  	struct rs5c372	*rs5c = i2c_get_clientdata(client);
12041 -	unsigned char	buf[8];
12042 +	unsigned char	buf[7];
12043  	int		addr;
12044  
12045  	dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d "
12046 diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
12047 index f0037ee..142af5b 100644
12048 --- a/drivers/s390/cio/qdio.h
12049 +++ b/drivers/s390/cio/qdio.h
12050 @@ -91,6 +91,12 @@ enum qdio_irq_states {
12051  #define AC1_SC_QEBSM_AVAILABLE		0x02	/* available for subchannel */
12052  #define AC1_SC_QEBSM_ENABLED		0x01	/* enabled for subchannel */
12053  
12054 +/* SIGA flags */
12055 +#define QDIO_SIGA_WRITE		0x00
12056 +#define QDIO_SIGA_READ		0x01
12057 +#define QDIO_SIGA_SYNC		0x02
12058 +#define QDIO_SIGA_QEBSM_FLAG	0x80
12059 +
12060  #ifdef CONFIG_64BIT
12061  static inline int do_sqbs(u64 token, unsigned char state, int queue,
12062  			  int *start, int *count)
12063 diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
12064 index 00520f9..1763afc 100644
12065 --- a/drivers/s390/cio/qdio_main.c
12066 +++ b/drivers/s390/cio/qdio_main.c
12067 @@ -29,11 +29,12 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
12068  MODULE_DESCRIPTION("QDIO base support");
12069  MODULE_LICENSE("GPL");
12070  
12071 -static inline int do_siga_sync(struct subchannel_id schid,
12072 -			       unsigned int out_mask, unsigned int in_mask)
12073 +static inline int do_siga_sync(unsigned long schid,
12074 +			       unsigned int out_mask, unsigned int in_mask,
12075 +			       unsigned int fc)
12076  {
12077 -	register unsigned long __fc asm ("0") = 2;
12078 -	register struct subchannel_id __schid asm ("1") = schid;
12079 +	register unsigned long __fc asm ("0") = fc;
12080 +	register unsigned long __schid asm ("1") = schid;
12081  	register unsigned long out asm ("2") = out_mask;
12082  	register unsigned long in asm ("3") = in_mask;
12083  	int cc;
12084 @@ -47,10 +48,11 @@ static inline int do_siga_sync(struct subchannel_id schid,
12085  	return cc;
12086  }
12087  
12088 -static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
12089 +static inline int do_siga_input(unsigned long schid, unsigned int mask,
12090 +				unsigned int fc)
12091  {
12092 -	register unsigned long __fc asm ("0") = 1;
12093 -	register struct subchannel_id __schid asm ("1") = schid;
12094 +	register unsigned long __fc asm ("0") = fc;
12095 +	register unsigned long __schid asm ("1") = schid;
12096  	register unsigned long __mask asm ("2") = mask;
12097  	int cc;
12098  
12099 @@ -279,6 +281,8 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
12100  static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
12101  			  unsigned int input)
12102  {
12103 +	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
12104 +	unsigned int fc = QDIO_SIGA_SYNC;
12105  	int cc;
12106  
12107  	if (!need_siga_sync(q))
12108 @@ -287,7 +291,12 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
12109  	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
12110  	qperf_inc(q, siga_sync);
12111  
12112 -	cc = do_siga_sync(q->irq_ptr->schid, output, input);
12113 +	if (is_qebsm(q)) {
12114 +		schid = q->irq_ptr->sch_token;
12115 +		fc |= QDIO_SIGA_QEBSM_FLAG;
12116 +	}
12117 +
12118 +	cc = do_siga_sync(schid, output, input, fc);
12119  	if (cc)
12120  		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
12121  	return cc;
12122 @@ -313,8 +322,8 @@ static inline int qdio_siga_sync_all(struct qdio_q *q)
12123  
12124  static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
12125  {
12126 -	unsigned long schid;
12127 -	unsigned int fc = 0;
12128 +	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
12129 +	unsigned int fc = QDIO_SIGA_WRITE;
12130  	u64 start_time = 0;
12131  	int cc;
12132  
12133 @@ -323,11 +332,8 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
12134  
12135  	if (is_qebsm(q)) {
12136  		schid = q->irq_ptr->sch_token;
12137 -		fc |= 0x80;
12138 +		fc |= QDIO_SIGA_QEBSM_FLAG;
12139  	}
12140 -	else
12141 -		schid = *((u32 *)&q->irq_ptr->schid);
12142 -
12143  again:
12144  	cc = do_siga_output(schid, q->mask, busy_bit, fc);
12145  
12146 @@ -347,12 +353,19 @@ again:
12147  
12148  static inline int qdio_siga_input(struct qdio_q *q)
12149  {
12150 +	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
12151 +	unsigned int fc = QDIO_SIGA_READ;
12152  	int cc;
12153  
12154  	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
12155  	qperf_inc(q, siga_read);
12156  
12157 -	cc = do_siga_input(q->irq_ptr->schid, q->mask);
12158 +	if (is_qebsm(q)) {
12159 +		schid = q->irq_ptr->sch_token;
12160 +		fc |= QDIO_SIGA_QEBSM_FLAG;
12161 +	}
12162 +
12163 +	cc = do_siga_input(schid, q->mask, fc);
12164  	if (cc)
12165  		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
12166  	return cc;
12167 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
12168 index b860d65..4cf7ffa 100644
12169 --- a/drivers/scsi/gdth.c
12170 +++ b/drivers/scsi/gdth.c
12171 @@ -4175,6 +4175,14 @@ static int ioc_general(void __user *arg, char *cmnd)
12172      ha = gdth_find_ha(gen.ionode);
12173      if (!ha)
12174          return -EFAULT;
12175 +
12176 +    if (gen.data_len > INT_MAX)
12177 +        return -EINVAL;
12178 +    if (gen.sense_len > INT_MAX)
12179 +        return -EINVAL;
12180 +    if (gen.data_len + gen.sense_len > INT_MAX)
12181 +        return -EINVAL;
12182 +
12183      if (gen.data_len + gen.sense_len != 0) {
12184          if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
12185                                       FALSE, &paddr)))
12186 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
12187 index 042153c..ddbade7 100644
12188 --- a/drivers/scsi/libsas/sas_ata.c
12189 +++ b/drivers/scsi/libsas/sas_ata.c
12190 @@ -347,6 +347,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
12191  static struct ata_port_operations sas_sata_ops = {
12192  	.phy_reset		= sas_ata_phy_reset,
12193  	.post_internal_cmd	= sas_ata_post_internal,
12194 +	.qc_defer               = ata_std_qc_defer,
12195  	.qc_prep		= ata_noop_qc_prep,
12196  	.qc_issue		= sas_ata_qc_issue,
12197  	.qc_fill_rtf		= sas_ata_qc_fill_rtf,
12198 diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
12199 index f0cfba9..baf2b60 100644
12200 --- a/drivers/scsi/libsas/sas_scsi_host.c
12201 +++ b/drivers/scsi/libsas/sas_scsi_host.c
12202 @@ -649,6 +649,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
12203  
12204  	spin_lock_irqsave(shost->host_lock, flags);
12205  	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
12206 +	shost->host_eh_scheduled = 0;
12207  	spin_unlock_irqrestore(shost->host_lock, flags);
12208  
12209  	SAS_DPRINTK("Enter %s\n", __func__);
12210 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
12211 index 57bcd5c..9e59026 100644
12212 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
12213 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
12214 @@ -2057,9 +2057,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
12215  		/* adjust hba_queue_depth, reply_free_queue_depth,
12216  		 * and queue_size
12217  		 */
12218 -		ioc->hba_queue_depth -= queue_diff;
12219 -		ioc->reply_free_queue_depth -= queue_diff;
12220 -		queue_size -= queue_diff;
12221 +		ioc->hba_queue_depth -= (queue_diff / 2);
12222 +		ioc->reply_free_queue_depth -= (queue_diff / 2);
12223 +		queue_size = facts->MaxReplyDescriptorPostQueueDepth;
12224  	}
12225  	ioc->reply_post_queue_depth = queue_size;
12226  
12227 @@ -3662,6 +3662,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
12228  	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
12229  	mutex_init(&ioc->scsih_cmds.mutex);
12230  
12231 +	/* scsih internal command bits */
12232 +	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
12233 +	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
12234 +	mutex_init(&ioc->scsih_cmds.mutex);
12235 +
12236  	/* task management internal command bits */
12237  	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
12238  	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
12239 @@ -3786,6 +3791,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
12240  static void
12241  _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
12242  {
12243 +	mpt2sas_scsih_reset_handler(ioc, reset_phase);
12244 +	mpt2sas_ctl_reset_handler(ioc, reset_phase);
12245  	switch (reset_phase) {
12246  	case MPT2_IOC_PRE_RESET:
12247  		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
12248 @@ -3816,8 +3823,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
12249  		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
12250  		break;
12251  	}
12252 -	mpt2sas_scsih_reset_handler(ioc, reset_phase);
12253 -	mpt2sas_ctl_reset_handler(ioc, reset_phase);
12254  }
12255  
12256  /**
12257 @@ -3871,6 +3876,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
12258  {
12259  	int r;
12260  	unsigned long flags;
12261 +	u8 pe_complete = ioc->wait_for_port_enable_to_complete;
12262  
12263  	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
12264  	    __func__));
12265 @@ -3913,6 +3919,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
12266  	if (r)
12267  		goto out;
12268  	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
12269 +
12270 +	/* If this hard reset is called while port enable is active, then
12271 +	 * there is no reason to call make_ioc_operational
12272 +	 */
12273 +	if (pe_complete) {
12274 +		r = -EFAULT;
12275 +		goto out;
12276 +	}
12277  	r = _base_make_ioc_operational(ioc, sleep_flag);
12278  	if (!r)
12279  		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
12280 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
12281 index 16e99b6..794d927 100644
12282 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
12283 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
12284 @@ -819,7 +819,7 @@ _scsih_is_end_device(u32 device_info)
12285  }
12286  
12287  /**
12288 - * mptscsih_get_scsi_lookup - returns scmd entry
12289 + * _scsih_scsi_lookup_get - returns scmd entry
12290   * @ioc: per adapter object
12291   * @smid: system request message index
12292   *
12293 @@ -832,6 +832,28 @@ _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
12294  }
12295  
12296  /**
12297 + * _scsih_scsi_lookup_get_clear - returns scmd entry
12298 + * @ioc: per adapter object
12299 + * @smid: system request message index
12300 + *
12301 + * Returns the smid stored scmd pointer.
12302 + * Then will derefrence the stored scmd pointer.
12303 + */
12304 +static inline struct scsi_cmnd *
12305 +_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
12306 +{
12307 +	unsigned long flags;
12308 +	struct scsi_cmnd *scmd;
12309 +
12310 +	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
12311 +	scmd = ioc->scsi_lookup[smid - 1].scmd;
12312 +	ioc->scsi_lookup[smid - 1].scmd = NULL;
12313 +	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
12314 +
12315 +	return scmd;
12316 +}
12317 +
12318 +/**
12319   * _scsih_scsi_lookup_find_by_scmd - scmd lookup
12320   * @ioc: per adapter object
12321   * @smid: system request message index
12322 @@ -2957,9 +2979,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
12323  	u16 handle;
12324  
12325  	for (i = 0 ; i < event_data->NumEntries; i++) {
12326 -		if (event_data->PHY[i].PhyStatus &
12327 -		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
12328 -			continue;
12329  		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
12330  		if (!handle)
12331  			continue;
12332 @@ -3186,7 +3205,7 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
12333  	u16 count = 0;
12334  
12335  	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
12336 -		scmd = _scsih_scsi_lookup_get(ioc, smid);
12337 +		scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
12338  		if (!scmd)
12339  			continue;
12340  		count++;
12341 @@ -3778,7 +3797,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
12342  	u32 response_code = 0;
12343  
12344  	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
12345 -	scmd = _scsih_scsi_lookup_get(ioc, smid);
12346 +	scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
12347  	if (scmd == NULL)
12348  		return 1;
12349  
12350 @@ -4940,6 +4959,12 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
12351  		     event_data);
12352  #endif
12353  
12354 +	/* In MPI Revision K (0xC), the internal device reset complete was
12355 +	 * implemented, so avoid setting tm_busy flag for older firmware.
12356 +	 */
12357 +	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
12358 +		return;
12359 +
12360  	if (event_data->ReasonCode !=
12361  	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
12362  	   event_data->ReasonCode !=
12363 @@ -5034,6 +5059,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
12364      struct fw_event_work *fw_event)
12365  {
12366  	struct scsi_cmnd *scmd;
12367 +	struct scsi_device *sdev;
12368  	u16 smid, handle;
12369  	u32 lun;
12370  	struct MPT2SAS_DEVICE *sas_device_priv_data;
12371 @@ -5044,12 +5070,17 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
12372  	Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
12373  #endif
12374  	u16 ioc_status;
12375 +	unsigned long flags;
12376 +	int r;
12377 +
12378  	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
12379  	    "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
12380  	    event_data->PortWidth));
12381  	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
12382  	    __func__));
12383  
12384 +	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
12385 +	ioc->broadcast_aen_busy = 0;
12386  	termination_count = 0;
12387  	query_count = 0;
12388  	mpi_reply = ioc->tm_cmds.reply;
12389 @@ -5057,7 +5088,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
12390  		scmd = _scsih_scsi_lookup_get(ioc, smid);
12391  		if (!scmd)
12392  			continue;
12393 -		sas_device_priv_data = scmd->device->hostdata;
12394 +		sdev = scmd->device;
12395 +		sas_device_priv_data = sdev->hostdata;
12396  		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
12397  			continue;
12398  		 /* skip hidden raid components */
12399 @@ -5073,6 +5105,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
12400  		lun = sas_device_priv_data->lun;
12401  		query_count++;
12402  
12403 +		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
12404  		mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
12405  		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
12406  		ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
12407 @@ -5082,14 +5115,20 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
12408  		    (mpi_reply->ResponseCode ==
12409  		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
12410  		     mpi_reply->ResponseCode ==
12411 -		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
12412 +		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
12413 +			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
12414  			continue;
12415 -
12416 -		mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
12417 -		    MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
12418 +		}
12419 +		r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
12420 +		    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
12421 +		    scmd);
12422 +		if (r == FAILED)
12423 +			sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
12424 +			    "scmd(%p)\n", scmd);
12425  		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
12426 +		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
12427  	}
12428 -	ioc->broadcast_aen_busy = 0;
12429 +	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
12430  
12431  	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
12432  	    "%s - exit, query_count = %d termination_count = %d\n",
12433 @@ -6685,6 +6724,7 @@ _scsih_remove(struct pci_dev *pdev)
12434  		destroy_workqueue(wq);
12435  
12436  	/* release all the volumes */
12437 +	_scsih_ir_shutdown(ioc);
12438  	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
12439  	    list) {
12440  		if (raid_device->starget) {
12441 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
12442 index 6cfa014..dd78f9e 100644
12443 --- a/drivers/scsi/pmcraid.h
12444 +++ b/drivers/scsi/pmcraid.h
12445 @@ -568,7 +568,6 @@ struct pmcraid_cmd {
12446  	struct pmcraid_control_block *ioa_cb;
12447  	dma_addr_t ioa_cb_bus_addr;
12448  	dma_addr_t dma_handle;
12449 -	u8 *sense_buffer;
12450  
12451  	/* pointer to mid layer structure of SCSI commands */
12452  	struct scsi_cmnd *scsi_cmd;
12453 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
12454 index 1a1b281..16df82a 100644
12455 --- a/drivers/scsi/qla2xxx/qla_gbl.h
12456 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
12457 @@ -92,6 +92,7 @@ extern int ql2xshiftctondsd;
12458  extern int ql2xdbwr;
12459  extern int ql2xdontresethba;
12460  extern int ql2xasynctmfenable;
12461 +extern int ql2xgffidenable;
12462  extern int ql2xenabledif;
12463  extern int ql2xenablehba_err_chk;
12464  extern int ql2xtargetreset;
12465 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
12466 index 9c383ba..49e7b09 100644
12467 --- a/drivers/scsi/qla2xxx/qla_init.c
12468 +++ b/drivers/scsi/qla2xxx/qla_init.c
12469 @@ -3258,8 +3258,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
12470  			continue;
12471  
12472  		/* Bypass ports whose FCP-4 type is not FCP_SCSI */
12473 -		if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
12474 -		    new_fcport->fc4_type != FC4_TYPE_UNKNOWN)
12475 +		if (ql2xgffidenable &&
12476 +		    (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
12477 +		    new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
12478  			continue;
12479  
12480  		/* Locate matching device in database. */
12481 diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
12482 index 579f028..de84499 100644
12483 --- a/drivers/scsi/qla2xxx/qla_iocb.c
12484 +++ b/drivers/scsi/qla2xxx/qla_iocb.c
12485 @@ -1061,6 +1061,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
12486  		fcp_cmnd->additional_cdb_len |= 2;
12487  
12488  	int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
12489 +	host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
12490  	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
12491  	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
12492  	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
12493 diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
12494 index 0a71cc7..424cf18 100644
12495 --- a/drivers/scsi/qla2xxx/qla_nx.c
12496 +++ b/drivers/scsi/qla2xxx/qla_nx.c
12497 @@ -2740,6 +2740,7 @@ sufficient_dsds:
12498  			goto queuing_error_fcp_cmnd;
12499  
12500  		int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
12501 +		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
12502  
12503  		/* build FCP_CMND IU */
12504  		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
12505 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
12506 index 1e4bff6..202a31a 100644
12507 --- a/drivers/scsi/qla2xxx/qla_os.c
12508 +++ b/drivers/scsi/qla2xxx/qla_os.c
12509 @@ -160,6 +160,11 @@ MODULE_PARM_DESC(ql2xtargetreset,
12510  		 "Enable target reset."
12511  		 "Default is 1 - use hw defaults.");
12512  
12513 +int ql2xgffidenable;
12514 +module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR);
12515 +MODULE_PARM_DESC(ql2xgffidenable,
12516 +		"Enables GFF_ID checks of port type. "
12517 +		"Default is 0 - Do not use GFF_ID information.");
12518  
12519  int ql2xasynctmfenable;
12520  module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
12521 @@ -2090,6 +2095,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
12522  		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
12523  		ha->gid_list_info_size = 8;
12524  		ha->optrom_size = OPTROM_SIZE_82XX;
12525 +		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
12526  		ha->isp_ops = &qla82xx_isp_ops;
12527  		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
12528  		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
12529 diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
12530 index 5d4a382..449256f 100644
12531 --- a/drivers/scsi/qla4xxx/ql4_nx.c
12532 +++ b/drivers/scsi/qla4xxx/ql4_nx.c
12533 @@ -5,6 +5,7 @@
12534   * See LICENSE.qla4xxx for copyright and licensing details.
12535   */
12536  #include <linux/delay.h>
12537 +#include <linux/io.h>
12538  #include <linux/pci.h>
12539  #include "ql4_def.h"
12540  #include "ql4_glbl.h"
12541 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
12542 index ee02d38..5bff8a2 100644
12543 --- a/drivers/scsi/scsi_lib.c
12544 +++ b/drivers/scsi/scsi_lib.c
12545 @@ -1632,9 +1632,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
12546  
12547  	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
12548  
12549 -	/* New queue, no concurrency on queue_flags */
12550  	if (!shost->use_clustering)
12551 -		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
12552 +		q->limits.cluster = 0;
12553  
12554  	/*
12555  	 * set a reasonable default alignment on word boundaries: the
12556 @@ -2428,7 +2427,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
12557  		sdev->sdev_state = SDEV_RUNNING;
12558  	else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
12559  		sdev->sdev_state = SDEV_CREATED;
12560 -	else
12561 +	else if (sdev->sdev_state != SDEV_CANCEL &&
12562 +		 sdev->sdev_state != SDEV_OFFLINE)
12563  		return -EINVAL;
12564  
12565  	spin_lock_irqsave(q->queue_lock, flags);
12566 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
12567 index c3f6737..7a0ca6f 100644
12568 --- a/drivers/scsi/scsi_sysfs.c
12569 +++ b/drivers/scsi/scsi_sysfs.c
12570 @@ -962,10 +962,11 @@ static void __scsi_remove_target(struct scsi_target *starget)
12571  	list_for_each_entry(sdev, &shost->__devices, siblings) {
12572  		if (sdev->channel != starget->channel ||
12573  		    sdev->id != starget->id ||
12574 -		    sdev->sdev_state == SDEV_DEL)
12575 +		    scsi_device_get(sdev))
12576  			continue;
12577  		spin_unlock_irqrestore(shost->host_lock, flags);
12578  		scsi_remove_device(sdev);
12579 +		scsi_device_put(sdev);
12580  		spin_lock_irqsave(shost->host_lock, flags);
12581  		goto restart;
12582  	}
12583 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
12584 index ffa0689..c52273c 100644
12585 --- a/drivers/scsi/sd.c
12586 +++ b/drivers/scsi/sd.c
12587 @@ -1153,6 +1153,12 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
12588  	u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
12589  	u64 bad_lba;
12590  	int info_valid;
12591 +	/*
12592 +	 * resid is optional but mostly filled in.  When it's unused,
12593 +	 * its value is zero, so we assume the whole buffer transferred
12594 +	 */
12595 +	unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
12596 +	unsigned int good_bytes;
12597  
12598  	if (scmd->request->cmd_type != REQ_TYPE_FS)
12599  		return 0;
12600 @@ -1186,7 +1192,8 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
12601  	/* This computation should always be done in terms of
12602  	 * the resolution of the device's medium.
12603  	 */
12604 -	return (bad_lba - start_lba) * scmd->device->sector_size;
12605 +	good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
12606 +	return min(good_bytes, transferred);
12607  }
12608  
12609  /**
12610 @@ -2252,11 +2259,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
12611  	index = sdkp->index;
12612  	dev = &sdp->sdev_gendev;
12613  
12614 -	if (index < SD_MAX_DISKS) {
12615 -		gd->major = sd_major((index & 0xf0) >> 4);
12616 -		gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
12617 -		gd->minors = SD_MINORS;
12618 -	}
12619 +	gd->major = sd_major((index & 0xf0) >> 4);
12620 +	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
12621 +	gd->minors = SD_MINORS;
12622 +
12623  	gd->fops = &sd_fops;
12624  	gd->private_data = &sdkp->driver;
12625  	gd->queue = sdkp->device->request_queue;
12626 @@ -2346,6 +2352,12 @@ static int sd_probe(struct device *dev)
12627  	if (error)
12628  		goto out_put;
12629  
12630 +	if (index >= SD_MAX_DISKS) {
12631 +		error = -ENODEV;
12632 +		sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
12633 +		goto out_free_index;
12634 +	}
12635 +
12636  	error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
12637  	if (error)
12638  		goto out_free_index;
12639 diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
12640 index 24110f6..c9e86de 100644
12641 --- a/drivers/serial/8250.c
12642 +++ b/drivers/serial/8250.c
12643 @@ -241,7 +241,8 @@ static const struct serial8250_config uart_config[] = {
12644  		.fifo_size	= 128,
12645  		.tx_loadsz	= 128,
12646  		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
12647 -		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
12648 +		/* UART_CAP_EFR breaks billionon CF bluetooth card. */
12649 +		.flags		= UART_CAP_FIFO | UART_CAP_SLEEP,
12650  	},
12651  	[PORT_16654] = {
12652  		.name		= "ST16654",
12653 diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
12654 index 5dff45c..f083f7c 100644
12655 --- a/drivers/serial/mfd.c
12656 +++ b/drivers/serial/mfd.c
12657 @@ -892,8 +892,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
12658  	unsigned char cval, fcr = 0;
12659  	unsigned long flags;
12660  	unsigned int baud, quot;
12661 -	u32 mul = 0x3600;
12662 -	u32 ps = 0x10;
12663 +	u32 ps, mul;
12664  
12665  	switch (termios->c_cflag & CSIZE) {
12666  	case CS5:
12667 @@ -937,20 +936,19 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
12668  		ps = 0xC;
12669  		quot = 1;
12670  		break;
12671 -	case 2500000:
12672 -		mul = 0x2710;
12673 -		ps = 0x10;
12674 -		quot = 1;
12675 -		break;
12676  	case 18432000:
12677  		mul = 0x2400;
12678  		ps = 0x10;
12679  		quot = 1;
12680  		break;
12681 +	case 3000000:
12682 +	case 2500000:
12683 +	case 2000000:
12684  	case 1500000:
12685 -		mul = 0x1D4C;
12686 -		ps = 0xc;
12687 -		quot = 1;
12688 + 	case 1000000:
12689 + 	case 500000:
12690 +		/* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */
12691 +		mul = baud / 500000 * 0x9C4;
12692  		break;
12693  	default:
12694  		;
12695 diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
12696 index ef9c6a0..744d3f6 100644
12697 --- a/drivers/ssb/b43_pci_bridge.c
12698 +++ b/drivers/ssb/b43_pci_bridge.c
12699 @@ -24,6 +24,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
12700  	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },
12701  	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) },
12702  	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) },
12703 +	{ PCI_DEVICE(PCI_VENDOR_ID_BCM_GVC,  0x4318) },
12704  	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
12705  	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
12706  	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) },
12707 diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
12708 index 526682d..17053a5 100644
12709 --- a/drivers/ssb/pcmcia.c
12710 +++ b/drivers/ssb/pcmcia.c
12711 @@ -734,7 +734,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
12712  
12713  	/* Fetch the vendor specific tuples. */
12714  	res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
12715 -				ssb_pcmcia_do_get_invariants, sprom);
12716 +				ssb_pcmcia_do_get_invariants, iv);
12717  	if ((res == 0) || (res == -ENOSPC))
12718  		return 0;
12719  
12720 diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
12721 index 5b279fb..6244555 100644
12722 --- a/drivers/staging/asus_oled/asus_oled.c
12723 +++ b/drivers/staging/asus_oled/asus_oled.c
12724 @@ -620,13 +620,13 @@ static ssize_t class_set_picture(struct device *device,
12725  
12726  #define ASUS_OLED_DEVICE_ATTR(_file)		dev_attr_asus_oled_##_file
12727  
12728 -static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO,
12729 +static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO,
12730  		   get_enabled, set_enabled);
12731 -static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture);
12732 +static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture);
12733  
12734 -static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO,
12735 +static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO,
12736  		   class_get_enabled, class_set_enabled);
12737 -static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture);
12738 +static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture);
12739  
12740  static int asus_oled_probe(struct usb_interface *interface,
12741  			   const struct usb_device_id *id)
12742 diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c
12743 index 2ea97de..876be5a 100644
12744 --- a/drivers/staging/batman-adv/soft-interface.c
12745 +++ b/drivers/staging/batman-adv/soft-interface.c
12746 @@ -246,6 +246,10 @@ void interface_rx(struct sk_buff *skb, int hdr_size)
12747  	skb_pull_rcsum(skb, hdr_size);
12748  /*	skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
12749  
12750 +	if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) {
12751 +		kfree_skb(skb);
12752 +		return;
12753 +	}
12754  	skb->dev = dev;
12755  	skb->protocol = eth_type_trans(skb, dev);
12756  
12757 diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
12758 index 8b383ee..5c6c727 100644
12759 --- a/drivers/staging/comedi/drivers/jr3_pci.c
12760 +++ b/drivers/staging/comedi/drivers/jr3_pci.c
12761 @@ -54,6 +54,7 @@ Devices: [JR3] PCI force sensor board (jr3_pci)
12762  
12763  #define PCI_VENDOR_ID_JR3 0x1762
12764  #define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111
12765 +#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111
12766  #define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112
12767  #define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113
12768  #define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114
12769 @@ -73,6 +74,8 @@ static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
12770  	{
12771  	PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
12772  		    PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
12773 +	PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
12774 +		    PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
12775  	PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
12776  		    PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
12777  	PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
12778 @@ -807,6 +810,10 @@ static int jr3_pci_attach(struct comedi_device *dev,
12779  					devpriv->n_channels = 1;
12780  				}
12781  				break;
12782 +			case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:{
12783 +					devpriv->n_channels = 1;
12784 +				}
12785 +				break;
12786  			case PCI_DEVICE_ID_JR3_2_CHANNEL:{
12787  					devpriv->n_channels = 2;
12788  				}
12789 diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
12790 index 3acf7e6..681312d 100644
12791 --- a/drivers/staging/comedi/drivers/ni_labpc.c
12792 +++ b/drivers/staging/comedi/drivers/ni_labpc.c
12793 @@ -572,7 +572,8 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
12794  	/* grab our IRQ */
12795  	if (irq) {
12796  		isr_flags = 0;
12797 -		if (thisboard->bustype == pci_bustype)
12798 +		if (thisboard->bustype == pci_bustype
12799 +		    || thisboard->bustype == pcmcia_bustype)
12800  			isr_flags |= IRQF_SHARED;
12801  		if (request_irq(irq, labpc_interrupt, isr_flags,
12802  				driver_labpc.driver_name, dev)) {
12803 diff --git a/drivers/staging/cx25821/cx25821-video.c b/drivers/staging/cx25821/cx25821-video.c
12804 index 1d5e879..0d318c7 100644
12805 --- a/drivers/staging/cx25821/cx25821-video.c
12806 +++ b/drivers/staging/cx25821/cx25821-video.c
12807 @@ -92,7 +92,7 @@ int cx25821_get_format_size(void)
12808  	return ARRAY_SIZE(formats);
12809  }
12810  
12811 -struct cx25821_fmt *format_by_fourcc(unsigned int fourcc)
12812 +struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc)
12813  {
12814  	unsigned int i;
12815  
12816 @@ -848,7 +848,7 @@ static int video_open(struct file *file)
12817         pix_format =
12818  	   (dev->channels[ch_id].pixel_formats ==
12819  	    PIXEL_FRMT_411) ? V4L2_PIX_FMT_Y41P : V4L2_PIX_FMT_YUYV;
12820 -       fh->fmt = format_by_fourcc(pix_format);
12821 +       fh->fmt = cx25821_format_by_fourcc(pix_format);
12822  
12823         v4l2_prio_open(&dev->channels[ch_id].prio, &fh->prio);
12824  
12825 @@ -1009,7 +1009,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
12826         if (0 != err)
12827  	       return err;
12828  
12829 -       fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
12830 +       fh->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
12831         fh->vidq.field = f->fmt.pix.field;
12832  
12833         /* check if width and height is valid based on set standard */
12834 @@ -1117,7 +1117,7 @@ int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fo
12835  	enum v4l2_field field;
12836  	unsigned int maxw, maxh;
12837  
12838 -	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
12839 +	fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat);
12840  	if (NULL == fmt)
12841  		return -EINVAL;
12842  
12843 diff --git a/drivers/staging/cx25821/cx25821-video.h b/drivers/staging/cx25821/cx25821-video.h
12844 index cc6034b..a2415d3 100644
12845 --- a/drivers/staging/cx25821/cx25821-video.h
12846 +++ b/drivers/staging/cx25821/cx25821-video.h
12847 @@ -87,7 +87,7 @@ extern unsigned int vid_limit;
12848  
12849  #define FORMAT_FLAGS_PACKED       0x01
12850  extern struct cx25821_fmt formats[];
12851 -extern struct cx25821_fmt *format_by_fourcc(unsigned int fourcc);
12852 +extern struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc);
12853  extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM];
12854  
12855  extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
12856 diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
12857 index eed74f0..f21a0e8 100644
12858 --- a/drivers/staging/frontier/tranzport.c
12859 +++ b/drivers/staging/frontier/tranzport.c
12860 @@ -204,7 +204,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
12861      t->value = temp;							\
12862      return count;							\
12863    }									\
12864 -  static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value);
12865 +  static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value);
12866  
12867  show_int(enable);
12868  show_int(offline);
12869 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
12870 index ff1d247..26d3677 100644
12871 --- a/drivers/staging/hv/blkvsc_drv.c
12872 +++ b/drivers/staging/hv/blkvsc_drv.c
12873 @@ -369,6 +369,7 @@ static int blkvsc_probe(struct device *device)
12874  		blkdev->gd->first_minor = 0;
12875  	blkdev->gd->fops = &block_ops;
12876  	blkdev->gd->private_data = blkdev;
12877 +	blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
12878  	sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
12879  
12880  	blkvsc_do_inquiry(blkdev);
12881 diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
12882 index 1d2ebbe..95dee47 100644
12883 --- a/drivers/staging/hv/netvsc.c
12884 +++ b/drivers/staging/hv/netvsc.c
12885 @@ -1250,7 +1250,7 @@ static void NetVscOnChannelCallback(void *Context)
12886  	/* ASSERT(device); */
12887  
12888  	packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
12889 -			 GFP_KERNEL);
12890 +			 GFP_ATOMIC);
12891  	if (!packet)
12892  		return;
12893  	buffer = packet;
12894 diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
12895 index 64a0114..d2d5608 100644
12896 --- a/drivers/staging/hv/netvsc_drv.c
12897 +++ b/drivers/staging/hv/netvsc_drv.c
12898 @@ -233,6 +233,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
12899  	if (status == 1) {
12900  		netif_carrier_on(net);
12901  		netif_wake_queue(net);
12902 +		netif_notify_peers(net);
12903  	} else {
12904  		netif_carrier_off(net);
12905  		netif_stop_queue(net);
12906 diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
12907 index bb7d765..ab2d5fa 100644
12908 --- a/drivers/staging/iio/accel/adis16220_core.c
12909 +++ b/drivers/staging/iio/accel/adis16220_core.c
12910 @@ -506,7 +506,7 @@ static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL,
12911  		adis16220_write_reset, 0);
12912  
12913  #define IIO_DEV_ATTR_CAPTURE(_store)				\
12914 -	IIO_DEVICE_ATTR(capture, S_IWUGO, NULL, _store, 0)
12915 +	IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0)
12916  
12917  static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture);
12918  
12919 diff --git a/drivers/staging/line6/control.c b/drivers/staging/line6/control.c
12920 index 0b59852..e414571 100644
12921 --- a/drivers/staging/line6/control.c
12922 +++ b/drivers/staging/line6/control.c
12923 @@ -268,210 +268,210 @@ VARIAX_PARAM_R(float, mix2);
12924  VARIAX_PARAM_R(float, mix1);
12925  VARIAX_PARAM_R(int, pickup_wiring);
12926  
12927 -static DEVICE_ATTR(tweak, S_IWUGO | S_IRUGO, pod_get_tweak, pod_set_tweak);
12928 -static DEVICE_ATTR(wah_position, S_IWUGO | S_IRUGO, pod_get_wah_position,
12929 +static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak);
12930 +static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position,
12931  		   pod_set_wah_position);
12932 -static DEVICE_ATTR(compression_gain, S_IWUGO | S_IRUGO,
12933 +static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO,
12934  		   pod_get_compression_gain, pod_set_compression_gain);
12935 -static DEVICE_ATTR(vol_pedal_position, S_IWUGO | S_IRUGO,
12936 +static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO,
12937  		   pod_get_vol_pedal_position, pod_set_vol_pedal_position);
12938 -static DEVICE_ATTR(compression_threshold, S_IWUGO | S_IRUGO,
12939 +static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO,
12940  		   pod_get_compression_threshold,
12941  		   pod_set_compression_threshold);
12942 -static DEVICE_ATTR(pan, S_IWUGO | S_IRUGO, pod_get_pan, pod_set_pan);
12943 -static DEVICE_ATTR(amp_model_setup, S_IWUGO | S_IRUGO, pod_get_amp_model_setup,
12944 +static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan);
12945 +static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup,
12946  		   pod_set_amp_model_setup);
12947 -static DEVICE_ATTR(amp_model, S_IWUGO | S_IRUGO, pod_get_amp_model,
12948 +static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model,
12949  		   pod_set_amp_model);
12950 -static DEVICE_ATTR(drive, S_IWUGO | S_IRUGO, pod_get_drive, pod_set_drive);
12951 -static DEVICE_ATTR(bass, S_IWUGO | S_IRUGO, pod_get_bass, pod_set_bass);
12952 -static DEVICE_ATTR(mid, S_IWUGO | S_IRUGO, pod_get_mid, pod_set_mid);
12953 -static DEVICE_ATTR(lowmid, S_IWUGO | S_IRUGO, pod_get_lowmid, pod_set_lowmid);
12954 -static DEVICE_ATTR(treble, S_IWUGO | S_IRUGO, pod_get_treble, pod_set_treble);
12955 -static DEVICE_ATTR(highmid, S_IWUGO | S_IRUGO, pod_get_highmid,
12956 +static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive);
12957 +static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass);
12958 +static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid);
12959 +static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid);
12960 +static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble);
12961 +static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid,
12962  		   pod_set_highmid);
12963 -static DEVICE_ATTR(chan_vol, S_IWUGO | S_IRUGO, pod_get_chan_vol,
12964 +static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol,
12965  		   pod_set_chan_vol);
12966 -static DEVICE_ATTR(reverb_mix, S_IWUGO | S_IRUGO, pod_get_reverb_mix,
12967 +static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix,
12968  		   pod_set_reverb_mix);
12969 -static DEVICE_ATTR(effect_setup, S_IWUGO | S_IRUGO, pod_get_effect_setup,
12970 +static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup,
12971  		   pod_set_effect_setup);
12972 -static DEVICE_ATTR(band_1_frequency, S_IWUGO | S_IRUGO,
12973 +static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO,
12974  		   pod_get_band_1_frequency, pod_set_band_1_frequency);
12975 -static DEVICE_ATTR(presence, S_IWUGO | S_IRUGO, pod_get_presence,
12976 +static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence,
12977  		   pod_set_presence);
12978 -static DEVICE_ATTR2(treble__bass, treble, S_IWUGO | S_IRUGO,
12979 +static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO,
12980  		    pod_get_treble__bass, pod_set_treble__bass);
12981 -static DEVICE_ATTR(noise_gate_enable, S_IWUGO | S_IRUGO,
12982 +static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO,
12983  		   pod_get_noise_gate_enable, pod_set_noise_gate_enable);
12984 -static DEVICE_ATTR(gate_threshold, S_IWUGO | S_IRUGO, pod_get_gate_threshold,
12985 +static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold,
12986  		   pod_set_gate_threshold);
12987 -static DEVICE_ATTR(gate_decay_time, S_IWUGO | S_IRUGO, pod_get_gate_decay_time,
12988 +static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time,
12989  		   pod_set_gate_decay_time);
12990 -static DEVICE_ATTR(stomp_enable, S_IWUGO | S_IRUGO, pod_get_stomp_enable,
12991 +static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable,
12992  		   pod_set_stomp_enable);
12993 -static DEVICE_ATTR(comp_enable, S_IWUGO | S_IRUGO, pod_get_comp_enable,
12994 +static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable,
12995  		   pod_set_comp_enable);
12996 -static DEVICE_ATTR(stomp_time, S_IWUGO | S_IRUGO, pod_get_stomp_time,
12997 +static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time,
12998  		   pod_set_stomp_time);
12999 -static DEVICE_ATTR(delay_enable, S_IWUGO | S_IRUGO, pod_get_delay_enable,
13000 +static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable,
13001  		   pod_set_delay_enable);
13002 -static DEVICE_ATTR(mod_param_1, S_IWUGO | S_IRUGO, pod_get_mod_param_1,
13003 +static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1,
13004  		   pod_set_mod_param_1);
13005 -static DEVICE_ATTR(delay_param_1, S_IWUGO | S_IRUGO, pod_get_delay_param_1,
13006 +static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1,
13007  		   pod_set_delay_param_1);
13008 -static DEVICE_ATTR(delay_param_1_note_value, S_IWUGO | S_IRUGO,
13009 +static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO,
13010  		   pod_get_delay_param_1_note_value,
13011  		   pod_set_delay_param_1_note_value);
13012 -static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUGO | S_IRUGO,
13013 +static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO,
13014  		    pod_get_band_2_frequency__bass,
13015  		    pod_set_band_2_frequency__bass);
13016 -static DEVICE_ATTR(delay_param_2, S_IWUGO | S_IRUGO, pod_get_delay_param_2,
13017 +static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2,
13018  		   pod_set_delay_param_2);
13019 -static DEVICE_ATTR(delay_volume_mix, S_IWUGO | S_IRUGO,
13020 +static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO,
13021  		   pod_get_delay_volume_mix, pod_set_delay_volume_mix);
13022 -static DEVICE_ATTR(delay_param_3, S_IWUGO | S_IRUGO, pod_get_delay_param_3,
13023 +static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3,
13024  		   pod_set_delay_param_3);
13025 -static DEVICE_ATTR(reverb_enable, S_IWUGO | S_IRUGO, pod_get_reverb_enable,
13026 +static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable,
13027  		   pod_set_reverb_enable);
13028 -static DEVICE_ATTR(reverb_type, S_IWUGO | S_IRUGO, pod_get_reverb_type,
13029 +static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type,
13030  		   pod_set_reverb_type);
13031 -static DEVICE_ATTR(reverb_decay, S_IWUGO | S_IRUGO, pod_get_reverb_decay,
13032 +static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay,
13033  		   pod_set_reverb_decay);
13034 -static DEVICE_ATTR(reverb_tone, S_IWUGO | S_IRUGO, pod_get_reverb_tone,
13035 +static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone,
13036  		   pod_set_reverb_tone);
13037 -static DEVICE_ATTR(reverb_pre_delay, S_IWUGO | S_IRUGO,
13038 +static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO,
13039  		   pod_get_reverb_pre_delay, pod_set_reverb_pre_delay);
13040 -static DEVICE_ATTR(reverb_pre_post, S_IWUGO | S_IRUGO, pod_get_reverb_pre_post,
13041 +static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post,
13042  		   pod_set_reverb_pre_post);
13043 -static DEVICE_ATTR(band_2_frequency, S_IWUGO | S_IRUGO,
13044 +static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO,
13045  		   pod_get_band_2_frequency, pod_set_band_2_frequency);
13046 -static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUGO | S_IRUGO,
13047 +static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO,
13048  		    pod_get_band_3_frequency__bass,
13049  		    pod_set_band_3_frequency__bass);
13050 -static DEVICE_ATTR(wah_enable, S_IWUGO | S_IRUGO, pod_get_wah_enable,
13051 +static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable,
13052  		   pod_set_wah_enable);
13053 -static DEVICE_ATTR(modulation_lo_cut, S_IWUGO | S_IRUGO,
13054 +static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO,
13055  		   pod_get_modulation_lo_cut, pod_set_modulation_lo_cut);
13056 -static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUGO | S_IRUGO,
13057 +static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO,
13058  		   pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut);
13059 -static DEVICE_ATTR(volume_pedal_minimum, S_IWUGO | S_IRUGO,
13060 +static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO,
13061  		   pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum);
13062 -static DEVICE_ATTR(eq_pre_post, S_IWUGO | S_IRUGO, pod_get_eq_pre_post,
13063 +static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post,
13064  		   pod_set_eq_pre_post);
13065 -static DEVICE_ATTR(volume_pre_post, S_IWUGO | S_IRUGO, pod_get_volume_pre_post,
13066 +static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post,
13067  		   pod_set_volume_pre_post);
13068 -static DEVICE_ATTR(di_model, S_IWUGO | S_IRUGO, pod_get_di_model,
13069 +static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model,
13070  		   pod_set_di_model);
13071 -static DEVICE_ATTR(di_delay, S_IWUGO | S_IRUGO, pod_get_di_delay,
13072 +static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay,
13073  		   pod_set_di_delay);
13074 -static DEVICE_ATTR(mod_enable, S_IWUGO | S_IRUGO, pod_get_mod_enable,
13075 +static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable,
13076  		   pod_set_mod_enable);
13077 -static DEVICE_ATTR(mod_param_1_note_value, S_IWUGO | S_IRUGO,
13078 +static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO,
13079  		   pod_get_mod_param_1_note_value,
13080  		   pod_set_mod_param_1_note_value);
13081 -static DEVICE_ATTR(mod_param_2, S_IWUGO | S_IRUGO, pod_get_mod_param_2,
13082 +static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2,
13083  		   pod_set_mod_param_2);
13084 -static DEVICE_ATTR(mod_param_3, S_IWUGO | S_IRUGO, pod_get_mod_param_3,
13085 +static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3,
13086  		   pod_set_mod_param_3);
13087 -static DEVICE_ATTR(mod_param_4, S_IWUGO | S_IRUGO, pod_get_mod_param_4,
13088 +static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4,
13089  		   pod_set_mod_param_4);
13090 -static DEVICE_ATTR(mod_param_5, S_IWUGO | S_IRUGO, pod_get_mod_param_5,
13091 +static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5,
13092  		   pod_set_mod_param_5);
13093 -static DEVICE_ATTR(mod_volume_mix, S_IWUGO | S_IRUGO, pod_get_mod_volume_mix,
13094 +static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix,
13095  		   pod_set_mod_volume_mix);
13096 -static DEVICE_ATTR(mod_pre_post, S_IWUGO | S_IRUGO, pod_get_mod_pre_post,
13097 +static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post,
13098  		   pod_set_mod_pre_post);
13099 -static DEVICE_ATTR(modulation_model, S_IWUGO | S_IRUGO,
13100 +static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO,
13101  		   pod_get_modulation_model, pod_set_modulation_model);
13102 -static DEVICE_ATTR(band_3_frequency, S_IWUGO | S_IRUGO,
13103 +static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO,
13104  		   pod_get_band_3_frequency, pod_set_band_3_frequency);
13105 -static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUGO | S_IRUGO,
13106 +static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO,
13107  		    pod_get_band_4_frequency__bass,
13108  		    pod_set_band_4_frequency__bass);
13109 -static DEVICE_ATTR(mod_param_1_double_precision, S_IWUGO | S_IRUGO,
13110 +static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO,
13111  		   pod_get_mod_param_1_double_precision,
13112  		   pod_set_mod_param_1_double_precision);
13113 -static DEVICE_ATTR(delay_param_1_double_precision, S_IWUGO | S_IRUGO,
13114 +static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO,
13115  		   pod_get_delay_param_1_double_precision,
13116  		   pod_set_delay_param_1_double_precision);
13117 -static DEVICE_ATTR(eq_enable, S_IWUGO | S_IRUGO, pod_get_eq_enable,
13118 +static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable,
13119  		   pod_set_eq_enable);
13120 -static DEVICE_ATTR(tap, S_IWUGO | S_IRUGO, pod_get_tap, pod_set_tap);
13121 -static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUGO | S_IRUGO,
13122 +static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap);
13123 +static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO,
13124  		   pod_get_volume_tweak_pedal_assign,
13125  		   pod_set_volume_tweak_pedal_assign);
13126 -static DEVICE_ATTR(band_5_frequency, S_IWUGO | S_IRUGO,
13127 +static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO,
13128  		   pod_get_band_5_frequency, pod_set_band_5_frequency);
13129 -static DEVICE_ATTR(tuner, S_IWUGO | S_IRUGO, pod_get_tuner, pod_set_tuner);
13130 -static DEVICE_ATTR(mic_selection, S_IWUGO | S_IRUGO, pod_get_mic_selection,
13131 +static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner);
13132 +static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection,
13133  		   pod_set_mic_selection);
13134 -static DEVICE_ATTR(cabinet_model, S_IWUGO | S_IRUGO, pod_get_cabinet_model,
13135 +static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model,
13136  		   pod_set_cabinet_model);
13137 -static DEVICE_ATTR(stomp_model, S_IWUGO | S_IRUGO, pod_get_stomp_model,
13138 +static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model,
13139  		   pod_set_stomp_model);
13140 -static DEVICE_ATTR(roomlevel, S_IWUGO | S_IRUGO, pod_get_roomlevel,
13141 +static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel,
13142  		   pod_set_roomlevel);
13143 -static DEVICE_ATTR(band_4_frequency, S_IWUGO | S_IRUGO,
13144 +static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO,
13145  		   pod_get_band_4_frequency, pod_set_band_4_frequency);
13146 -static DEVICE_ATTR(band_6_frequency, S_IWUGO | S_IRUGO,
13147 +static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO,
13148  		   pod_get_band_6_frequency, pod_set_band_6_frequency);
13149 -static DEVICE_ATTR(stomp_param_1_note_value, S_IWUGO | S_IRUGO,
13150 +static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO,
13151  		   pod_get_stomp_param_1_note_value,
13152  		   pod_set_stomp_param_1_note_value);
13153 -static DEVICE_ATTR(stomp_param_2, S_IWUGO | S_IRUGO, pod_get_stomp_param_2,
13154 +static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2,
13155  		   pod_set_stomp_param_2);
13156 -static DEVICE_ATTR(stomp_param_3, S_IWUGO | S_IRUGO, pod_get_stomp_param_3,
13157 +static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3,
13158  		   pod_set_stomp_param_3);
13159 -static DEVICE_ATTR(stomp_param_4, S_IWUGO | S_IRUGO, pod_get_stomp_param_4,
13160 +static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4,
13161  		   pod_set_stomp_param_4);
13162 -static DEVICE_ATTR(stomp_param_5, S_IWUGO | S_IRUGO, pod_get_stomp_param_5,
13163 +static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5,
13164  		   pod_set_stomp_param_5);
13165 -static DEVICE_ATTR(stomp_param_6, S_IWUGO | S_IRUGO, pod_get_stomp_param_6,
13166 +static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6,
13167  		   pod_set_stomp_param_6);
13168 -static DEVICE_ATTR(amp_switch_select, S_IWUGO | S_IRUGO,
13169 +static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO,
13170  		   pod_get_amp_switch_select, pod_set_amp_switch_select);
13171 -static DEVICE_ATTR(delay_param_4, S_IWUGO | S_IRUGO, pod_get_delay_param_4,
13172 +static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4,
13173  		   pod_set_delay_param_4);
13174 -static DEVICE_ATTR(delay_param_5, S_IWUGO | S_IRUGO, pod_get_delay_param_5,
13175 +static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5,
13176  		   pod_set_delay_param_5);
13177 -static DEVICE_ATTR(delay_pre_post, S_IWUGO | S_IRUGO, pod_get_delay_pre_post,
13178 +static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post,
13179  		   pod_set_delay_pre_post);
13180 -static DEVICE_ATTR(delay_model, S_IWUGO | S_IRUGO, pod_get_delay_model,
13181 +static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model,
13182  		   pod_set_delay_model);
13183 -static DEVICE_ATTR(delay_verb_model, S_IWUGO | S_IRUGO,
13184 +static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO,
13185  		   pod_get_delay_verb_model, pod_set_delay_verb_model);
13186 -static DEVICE_ATTR(tempo_msb, S_IWUGO | S_IRUGO, pod_get_tempo_msb,
13187 +static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb,
13188  		   pod_set_tempo_msb);
13189 -static DEVICE_ATTR(tempo_lsb, S_IWUGO | S_IRUGO, pod_get_tempo_lsb,
13190 +static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb,
13191  		   pod_set_tempo_lsb);
13192 -static DEVICE_ATTR(wah_model, S_IWUGO | S_IRUGO, pod_get_wah_model,
13193 +static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model,
13194  		   pod_set_wah_model);
13195 -static DEVICE_ATTR(bypass_volume, S_IWUGO | S_IRUGO, pod_get_bypass_volume,
13196 +static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume,
13197  		   pod_set_bypass_volume);
13198 -static DEVICE_ATTR(fx_loop_on_off, S_IWUGO | S_IRUGO, pod_get_fx_loop_on_off,
13199 +static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off,
13200  		   pod_set_fx_loop_on_off);
13201 -static DEVICE_ATTR(tweak_param_select, S_IWUGO | S_IRUGO,
13202 +static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO,
13203  		   pod_get_tweak_param_select, pod_set_tweak_param_select);
13204 -static DEVICE_ATTR(amp1_engage, S_IWUGO | S_IRUGO, pod_get_amp1_engage,
13205 +static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage,
13206  		   pod_set_amp1_engage);
13207 -static DEVICE_ATTR(band_1_gain, S_IWUGO | S_IRUGO, pod_get_band_1_gain,
13208 +static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain,
13209  		   pod_set_band_1_gain);
13210 -static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUGO | S_IRUGO,
13211 +static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO,
13212  		    pod_get_band_2_gain__bass, pod_set_band_2_gain__bass);
13213 -static DEVICE_ATTR(band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain,
13214 +static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain,
13215  		   pod_set_band_2_gain);
13216 -static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUGO | S_IRUGO,
13217 +static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO,
13218  		    pod_get_band_3_gain__bass, pod_set_band_3_gain__bass);
13219 -static DEVICE_ATTR(band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain,
13220 +static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain,
13221  		   pod_set_band_3_gain);
13222 -static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUGO | S_IRUGO,
13223 +static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO,
13224  		    pod_get_band_4_gain__bass, pod_set_band_4_gain__bass);
13225 -static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUGO | S_IRUGO,
13226 +static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO,
13227  		    pod_get_band_5_gain__bass, pod_set_band_5_gain__bass);
13228 -static DEVICE_ATTR(band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain,
13229 +static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain,
13230  		   pod_set_band_4_gain);
13231 -static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUGO | S_IRUGO,
13232 +static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO,
13233  		    pod_get_band_6_gain__bass, pod_set_band_6_gain__bass);
13234  static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write);
13235  static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable,
13236 diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
13237 index 32b6ca7..9b42e34 100644
13238 --- a/drivers/staging/line6/midi.c
13239 +++ b/drivers/staging/line6/midi.c
13240 @@ -362,8 +362,8 @@ static ssize_t midi_set_midi_mask_receive(struct device *dev,
13241  	return count;
13242  }
13243  
13244 -static DEVICE_ATTR(midi_mask_transmit, S_IWUGO | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
13245 -static DEVICE_ATTR(midi_mask_receive, S_IWUGO | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive);
13246 +static DEVICE_ATTR(midi_mask_transmit, S_IWUSR | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
13247 +static DEVICE_ATTR(midi_mask_receive, S_IWUSR | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive);
13248  
13249  /* MIDI device destructor */
13250  static int snd_line6_midi_free(struct snd_device *device)
13251 diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c
13252 index 28f5146..63318d7 100644
13253 --- a/drivers/staging/line6/pod.c
13254 +++ b/drivers/staging/line6/pod.c
13255 @@ -952,33 +952,33 @@ POD_GET_SYSTEM_PARAM(tuner_pitch, 1, 1);
13256  #undef GET_SYSTEM_PARAM
13257  
13258  /* POD special files: */
13259 -static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, pod_get_channel, pod_set_channel);
13260 +static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel, pod_set_channel);
13261  static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write);
13262  static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write);
13263  static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write);
13264 -static DEVICE_ATTR(dump, S_IWUGO | S_IRUGO, pod_get_dump, pod_set_dump);
13265 -static DEVICE_ATTR(dump_buf, S_IWUGO | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf);
13266 -static DEVICE_ATTR(finish, S_IWUGO, line6_nop_read, pod_set_finish);
13267 +static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump);
13268 +static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf);
13269 +static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish);
13270  static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version, line6_nop_write);
13271 -static DEVICE_ATTR(midi_postprocess, S_IWUGO | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess);
13272 -static DEVICE_ATTR(monitor_level, S_IWUGO | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level);
13273 +static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess);
13274 +static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level);
13275  static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write);
13276  static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write);
13277 -static DEVICE_ATTR(retrieve_amp_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_amp_setup);
13278 -static DEVICE_ATTR(retrieve_channel, S_IWUGO, line6_nop_read, pod_set_retrieve_channel);
13279 -static DEVICE_ATTR(retrieve_effects_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_effects_setup);
13280 -static DEVICE_ATTR(routing, S_IWUGO | S_IRUGO, pod_get_routing, pod_set_routing);
13281 +static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_amp_setup);
13282 +static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read, pod_set_retrieve_channel);
13283 +static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_effects_setup);
13284 +static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing, pod_set_routing);
13285  static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number, line6_nop_write);
13286 -static DEVICE_ATTR(store_amp_setup, S_IWUGO, line6_nop_read, pod_set_store_amp_setup);
13287 -static DEVICE_ATTR(store_channel, S_IWUGO, line6_nop_read, pod_set_store_channel);
13288 -static DEVICE_ATTR(store_effects_setup, S_IWUGO, line6_nop_read, pod_set_store_effects_setup);
13289 -static DEVICE_ATTR(tuner_freq, S_IWUGO | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq);
13290 -static DEVICE_ATTR(tuner_mute, S_IWUGO | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute);
13291 +static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read, pod_set_store_amp_setup);
13292 +static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read, pod_set_store_channel);
13293 +static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read, pod_set_store_effects_setup);
13294 +static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq);
13295 +static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute);
13296  static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write);
13297  static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write);
13298  
13299  #if CREATE_RAW_FILE
13300 -static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw);
13301 +static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
13302  #endif
13303  
13304  /*
13305 diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
13306 index e6770ea..db42178 100644
13307 --- a/drivers/staging/line6/toneport.c
13308 +++ b/drivers/staging/line6/toneport.c
13309 @@ -124,9 +124,9 @@ static ssize_t toneport_set_led_green(struct device *dev,
13310  	return count;
13311  }
13312  
13313 -static DEVICE_ATTR(led_red, S_IWUGO | S_IRUGO, line6_nop_read,
13314 +static DEVICE_ATTR(led_red, S_IWUSR | S_IRUGO, line6_nop_read,
13315  		   toneport_set_led_red);
13316 -static DEVICE_ATTR(led_green, S_IWUGO | S_IRUGO, line6_nop_read,
13317 +static DEVICE_ATTR(led_green, S_IWUSR | S_IRUGO, line6_nop_read,
13318  		   toneport_set_led_green);
13319  
13320  static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2)
13321 diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c
13322 index 58ddbe6..b2fc09b 100644
13323 --- a/drivers/staging/line6/variax.c
13324 +++ b/drivers/staging/line6/variax.c
13325 @@ -389,17 +389,17 @@ static ssize_t variax_set_raw2(struct device *dev,
13326  #endif
13327  
13328  /* Variax workbench special files: */
13329 -static DEVICE_ATTR(model, S_IWUGO | S_IRUGO, variax_get_model, variax_set_model);
13330 -static DEVICE_ATTR(volume, S_IWUGO | S_IRUGO, variax_get_volume, variax_set_volume);
13331 -static DEVICE_ATTR(tone, S_IWUGO | S_IRUGO, variax_get_tone, variax_set_tone);
13332 +static DEVICE_ATTR(model, S_IWUSR | S_IRUGO, variax_get_model, variax_set_model);
13333 +static DEVICE_ATTR(volume, S_IWUSR | S_IRUGO, variax_get_volume, variax_set_volume);
13334 +static DEVICE_ATTR(tone, S_IWUSR | S_IRUGO, variax_get_tone, variax_set_tone);
13335  static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write);
13336  static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write);
13337  static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write);
13338 -static DEVICE_ATTR(active, S_IWUGO | S_IRUGO, variax_get_active, variax_set_active);
13339 +static DEVICE_ATTR(active, S_IWUSR | S_IRUGO, variax_get_active, variax_set_active);
13340  
13341  #if CREATE_RAW_FILE
13342 -static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw);
13343 -static DEVICE_ATTR(raw2, S_IWUGO, line6_nop_read, variax_set_raw2);
13344 +static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
13345 +static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2);
13346  #endif
13347  
13348  
13349 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
13350 index 42783d7..6771520 100644
13351 --- a/drivers/staging/phison/phison.c
13352 +++ b/drivers/staging/phison/phison.c
13353 @@ -62,7 +62,7 @@ static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
13354  	};
13355  	const struct ata_port_info *ppi[] = { &info, NULL };
13356  
13357 -	ret = ata_pci_sff_init_one(pdev, ppi, &phison_sht, NULL, 0);
13358 +	ret = ata_pci_bmdma_init_one(pdev, ppi, &phison_sht, NULL, 0);
13359  
13360  	dev_dbg(&pdev->dev, "phison_init_one(), ret = %x\n", ret);
13361  
13362 diff --git a/drivers/staging/rt2860/chips/rt3090.c b/drivers/staging/rt2860/chips/rt3090.c
13363 index c2933c6..cbc59f8 100644
13364 --- a/drivers/staging/rt2860/chips/rt3090.c
13365 +++ b/drivers/staging/rt2860/chips/rt3090.c
13366 @@ -51,7 +51,8 @@ void NICInitRT3090RFRegisters(struct rt_rtmp_adapter *pAd)
13367  	if (IS_RT3090(pAd)) {
13368  		/* Init RF calibration */
13369  		/* Driver should toggle RF R30 bit7 before init RF registers */
13370 -		u32 RfReg = 0, data;
13371 +		u8 RfReg;
13372 +		u32 data;
13373  
13374  		RT30xxReadRFRegister(pAd, RF_R30, (u8 *)&RfReg);
13375  		RfReg |= 0x80;
13376 diff --git a/drivers/staging/rt2860/chips/rt30xx.c b/drivers/staging/rt2860/chips/rt30xx.c
13377 index 4367a19..88eba51 100644
13378 --- a/drivers/staging/rt2860/chips/rt30xx.c
13379 +++ b/drivers/staging/rt2860/chips/rt30xx.c
13380 @@ -53,7 +53,7 @@ struct rt_reg_pair RT30xx_RFRegTable[] = {
13381  	,
13382  	{RF_R06, 0x02}
13383  	,
13384 -	{RF_R07, 0x70}
13385 +	{RF_R07, 0x60}
13386  	,
13387  	{RF_R09, 0x0F}
13388  	,
13389 @@ -441,7 +441,7 @@ void RT30xxReverseRFSleepModeSetup(struct rt_rtmp_adapter *pAd)
13390  
13391  		/* VCO_IC, RF R7 register Bit 4 & Bit 5 to 1 */
13392  		RT30xxReadRFRegister(pAd, RF_R07, &RFValue);
13393 -		RFValue |= 0x30;
13394 +		RFValue |= 0x20;
13395  		RT30xxWriteRFRegister(pAd, RF_R07, RFValue);
13396  
13397  		/* Idoh, RF R9 register Bit 1, Bit 2 & Bit 3 to 1 */
13398 diff --git a/drivers/staging/rt2860/rt_main_dev.c b/drivers/staging/rt2860/rt_main_dev.c
13399 index ad60cea..caf8b76 100644
13400 --- a/drivers/staging/rt2860/rt_main_dev.c
13401 +++ b/drivers/staging/rt2860/rt_main_dev.c
13402 @@ -483,8 +483,6 @@ struct net_device *RtmpPhyNetDevInit(struct rt_rtmp_adapter *pAd,
13403  	net_dev->ml_priv = (void *)pAd;
13404  	pAd->net_dev = net_dev;
13405  
13406 -	netif_stop_queue(net_dev);
13407 -
13408  	return net_dev;
13409  
13410  }
13411 diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
13412 index ebf9074..aca0c46 100644
13413 --- a/drivers/staging/rt2860/usb_main_dev.c
13414 +++ b/drivers/staging/rt2860/usb_main_dev.c
13415 @@ -65,6 +65,7 @@ struct usb_device_id rtusb_usb_id[] = {
13416  	{USB_DEVICE(0x14B2, 0x3C07)},	/* AL */
13417  	{USB_DEVICE(0x050D, 0x8053)},	/* Belkin */
13418  	{USB_DEVICE(0x050D, 0x825B)},	/* Belkin */
13419 +	{USB_DEVICE(0x050D, 0x935A)},	/* Belkin F6D4050 v1 */
13420  	{USB_DEVICE(0x050D, 0x935B)},	/* Belkin F6D4050 v2 */
13421  	{USB_DEVICE(0x14B2, 0x3C23)},	/* Airlink */
13422  	{USB_DEVICE(0x14B2, 0x3C27)},	/* Airlink */
13423 @@ -105,6 +106,7 @@ struct usb_device_id rtusb_usb_id[] = {
13424  	{USB_DEVICE(0x0411, 0x016f)},	/* MelCo.,Inc. WLI-UC-G301N */
13425  	{USB_DEVICE(0x1737, 0x0070)},	/* Linksys WUSB100 */
13426  	{USB_DEVICE(0x1737, 0x0071)},	/* Linksys WUSB600N */
13427 +	{USB_DEVICE(0x1737, 0x0078)},	/* Linksys WUSB100v2 */
13428  	{USB_DEVICE(0x0411, 0x00e8)},	/* Buffalo WLI-UC-G300N */
13429  	{USB_DEVICE(0x050d, 0x815c)},	/* Belkin F5D8053 */
13430  	{USB_DEVICE(0x100D, 0x9031)},	/* Motorola 2770 */
13431 @@ -181,6 +183,7 @@ struct usb_device_id rtusb_usb_id[] = {
13432  	{USB_DEVICE(0x2001, 0x3C09)},	/* D-Link */
13433  	{USB_DEVICE(0x2001, 0x3C0A)},	/* D-Link 3072 */
13434  	{USB_DEVICE(0x2019, 0xED14)},	/* Planex Communications, Inc. */
13435 +	{USB_DEVICE(0x0411, 0x015D)},	/* Buffalo Airstation WLI-UC-GN */
13436  	{}			/* Terminating entry */
13437  };
13438  
13439 diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c
13440 index a0ece1f..e7e8745 100644
13441 --- a/drivers/staging/rtl8187se/r8185b_init.c
13442 +++ b/drivers/staging/rtl8187se/r8185b_init.c
13443 @@ -268,8 +268,12 @@ HwHSSIThreeWire(
13444  			}
13445  			udelay(10);
13446  		}
13447 -		if (TryCnt == TC_3W_POLL_MAX_TRY_CNT)
13448 -			panic("HwThreeWire(): CmdReg: %#X RE|WE bits are not clear!!\n", u1bTmp);
13449 +		if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) {
13450 +			printk(KERN_ERR "rtl8187se: HwThreeWire(): CmdReg:"
13451 +			       " %#X RE|WE bits are not clear!!\n", u1bTmp);
13452 +			dump_stack();
13453 +			return 0;
13454 +		}
13455  
13456  		// RTL8187S HSSI Read/Write Function
13457  		u1bTmp = read_nic_byte(dev, RF_SW_CONFIG);
13458 @@ -309,13 +313,23 @@ HwHSSIThreeWire(
13459  				int idx;
13460  				int ByteCnt = nDataBufBitCnt / 8;
13461                                  //printk("%d\n",nDataBufBitCnt);
13462 -				if ((nDataBufBitCnt % 8) != 0)
13463 -				panic("HwThreeWire(): nDataBufBitCnt(%d) should be multiple of 8!!!\n",
13464 -				nDataBufBitCnt);
13465 +				if ((nDataBufBitCnt % 8) != 0) {
13466 +					printk(KERN_ERR "rtl8187se: "
13467 +					       "HwThreeWire(): nDataBufBitCnt(%d)"
13468 +					       " should be multiple of 8!!!\n",
13469 +					       nDataBufBitCnt);
13470 +					dump_stack();
13471 +					nDataBufBitCnt += 8;
13472 +					nDataBufBitCnt &= ~7;
13473 +				}
13474  
13475 -			       if (nDataBufBitCnt > 64)
13476 -				panic("HwThreeWire(): nDataBufBitCnt(%d) should <= 64!!!\n",
13477 -				nDataBufBitCnt);
13478 +			       if (nDataBufBitCnt > 64) {
13479 +					printk(KERN_ERR "rtl8187se: HwThreeWire():"
13480 +					       " nDataBufBitCnt(%d) should <= 64!!!\n",
13481 +					       nDataBufBitCnt);
13482 +					dump_stack();
13483 +					nDataBufBitCnt = 64;
13484 +				}
13485  
13486  				for(idx = 0; idx < ByteCnt; idx++)
13487  				{
13488 diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
13489 index eb44b60..ac2bf11 100644
13490 --- a/drivers/staging/samsung-laptop/samsung-laptop.c
13491 +++ b/drivers/staging/samsung-laptop/samsung-laptop.c
13492 @@ -356,7 +356,7 @@ static ssize_t set_silent_state(struct device *dev,
13493  	}
13494  	return count;
13495  }
13496 -static DEVICE_ATTR(silent, S_IWUGO | S_IRUGO,
13497 +static DEVICE_ATTR(silent, S_IWUSR | S_IRUGO,
13498  		   get_silent_state, set_silent_state);
13499  
13500  
13501 diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
13502 index c7e061e..456cd5c 100644
13503 --- a/drivers/staging/udlfb/udlfb.c
13504 +++ b/drivers/staging/udlfb/udlfb.c
13505 @@ -1143,7 +1143,7 @@ static struct device_attribute fb_device_attrs[] = {
13506  	__ATTR_RO(metrics_bytes_sent),
13507  	__ATTR_RO(metrics_cpu_kcycles_used),
13508  	__ATTR_RO(metrics_misc),
13509 -	__ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store),
13510 +	__ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store),
13511  	__ATTR_RW(use_defio),
13512  };
13513  
13514 diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c
13515 index a2566f1..af3832b 100644
13516 --- a/drivers/staging/usbip/usbip_event.c
13517 +++ b/drivers/staging/usbip/usbip_event.c
13518 @@ -38,21 +38,13 @@ static int event_handler(struct usbip_device *ud)
13519  			ud->eh_ops.shutdown(ud);
13520  
13521  			ud->event &= ~USBIP_EH_SHUTDOWN;
13522 -
13523 -			break;
13524  		}
13525  
13526 -		/* Stop the error handler. */
13527 -		if (ud->event & USBIP_EH_BYE)
13528 -			return -1;
13529 -
13530  		/* Reset the device. */
13531  		if (ud->event & USBIP_EH_RESET) {
13532  			ud->eh_ops.reset(ud);
13533  
13534  			ud->event &= ~USBIP_EH_RESET;
13535 -
13536 -			break;
13537  		}
13538  
13539  		/* Mark the device as unusable. */
13540 @@ -60,13 +52,11 @@ static int event_handler(struct usbip_device *ud)
13541  			ud->eh_ops.unusable(ud);
13542  
13543  			ud->event &= ~USBIP_EH_UNUSABLE;
13544 -
13545 -			break;
13546  		}
13547  
13548 -		/* NOTREACHED */
13549 -		printk(KERN_ERR "%s: unknown event\n", __func__);
13550 -		return -1;
13551 +		/* Stop the error handler. */
13552 +		if (ud->event & USBIP_EH_BYE)
13553 +			return -1;
13554  	}
13555  
13556  	return 0;
13557 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
13558 index 0574d84..08bd26a 100644
13559 --- a/drivers/staging/usbip/vhci_hcd.c
13560 +++ b/drivers/staging/usbip/vhci_hcd.c
13561 @@ -164,6 +164,8 @@ void rh_port_disconnect(int rhport)
13562  	 * spin_unlock(&vdev->ud.lock); */
13563  
13564  	spin_unlock_irqrestore(&the_controller->lock, flags);
13565 +
13566 +	usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
13567  }
13568  
13569  
13570 @@ -797,20 +799,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
13571  		spin_unlock_irqrestore(&vdev->priv_lock, flags2);
13572  	}
13573  
13574 -
13575 -	if (!vdev->ud.tcp_socket) {
13576 -		/* tcp connection is closed */
13577 -		usbip_uinfo("vhci_hcd: vhci_urb_dequeue() gives back urb %p\n",
13578 -									urb);
13579 -
13580 -		usb_hcd_unlink_urb_from_ep(hcd, urb);
13581 -
13582 -		spin_unlock_irqrestore(&the_controller->lock, flags);
13583 -		usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
13584 -								urb->status);
13585 -		spin_lock_irqsave(&the_controller->lock, flags);
13586 -	}
13587 -
13588  	spin_unlock_irqrestore(&the_controller->lock, flags);
13589  
13590  	usbip_dbg_vhci_hc("leave\n");
13591 diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
13592 index 722c840..7e89383 100644
13593 --- a/drivers/staging/zram/zram_drv.c
13594 +++ b/drivers/staging/zram/zram_drv.c
13595 @@ -235,6 +235,7 @@ static int zram_read(struct zram *zram, struct bio *bio)
13596  
13597  		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
13598  			handle_zero_page(page);
13599 +			index++;
13600  			continue;
13601  		}
13602  
13603 @@ -243,12 +244,14 @@ static int zram_read(struct zram *zram, struct bio *bio)
13604  			pr_debug("Read before write: sector=%lu, size=%u",
13605  				(ulong)(bio->bi_sector), bio->bi_size);
13606  			/* Do nothing */
13607 +			index++;
13608  			continue;
13609  		}
13610  
13611  		/* Page is stored uncompressed since it's incompressible */
13612  		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
13613  			handle_uncompressed_page(zram, page, index);
13614 +			index++;
13615  			continue;
13616  		}
13617  
13618 @@ -324,6 +327,7 @@ static int zram_write(struct zram *zram, struct bio *bio)
13619  			mutex_unlock(&zram->lock);
13620  			zram_stat_inc(&zram->stats.pages_zero);
13621  			zram_set_flag(zram, index, ZRAM_ZERO);
13622 +			index++;
13623  			continue;
13624  		}
13625  
13626 diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
13627 index ea071a5..44447f5 100644
13628 --- a/drivers/usb/atm/ueagle-atm.c
13629 +++ b/drivers/usb/atm/ueagle-atm.c
13630 @@ -2301,7 +2301,7 @@ out:
13631  	return ret;
13632  }
13633  
13634 -static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot);
13635 +static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot);
13636  
13637  static ssize_t read_human_status(struct device *dev,
13638  			struct device_attribute *attr, char *buf)
13639 @@ -2364,8 +2364,7 @@ out:
13640  	return ret;
13641  }
13642  
13643 -static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO,
13644 -				read_human_status, NULL);
13645 +static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL);
13646  
13647  static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
13648  		char *buf)
13649 @@ -2397,7 +2396,7 @@ out:
13650  	return ret;
13651  }
13652  
13653 -static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL);
13654 +static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL);
13655  
13656  #define UEA_ATTR(name, reset)					\
13657  								\
13658 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
13659 index bc62fae..3ffa434 100644
13660 --- a/drivers/usb/class/cdc-acm.c
13661 +++ b/drivers/usb/class/cdc-acm.c
13662 @@ -1607,6 +1607,7 @@ static const struct usb_device_id acm_ids[] = {
13663  	{ NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
13664  	{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
13665  	{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
13666 +	{ NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
13667  	{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
13668  
13669  	/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
13670 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
13671 index f1aaff6..045bb4b 100644
13672 --- a/drivers/usb/core/devio.c
13673 +++ b/drivers/usb/core/devio.c
13674 @@ -965,10 +965,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
13675  
13676  static int proc_connectinfo(struct dev_state *ps, void __user *arg)
13677  {
13678 -	struct usbdevfs_connectinfo ci;
13679 +	struct usbdevfs_connectinfo ci = {
13680 +		.devnum = ps->dev->devnum,
13681 +		.slow = ps->dev->speed == USB_SPEED_LOW
13682 +	};
13683  
13684 -	ci.devnum = ps->dev->devnum;
13685 -	ci.slow = ps->dev->speed == USB_SPEED_LOW;
13686  	if (copy_to_user(arg, &ci, sizeof(ci)))
13687  		return -EFAULT;
13688  	return 0;
13689 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
13690 index 5cca00a..b5c965c 100644
13691 --- a/drivers/usb/core/hcd.c
13692 +++ b/drivers/usb/core/hcd.c
13693 @@ -1945,7 +1945,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
13694  
13695  	dev_dbg(&rhdev->dev, "usb %s%s\n",
13696  			(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
13697 -	clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
13698  	if (!hcd->driver->bus_resume)
13699  		return -ENOENT;
13700  	if (hcd->state == HC_STATE_RUNNING)
13701 @@ -1953,6 +1952,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
13702  
13703  	hcd->state = HC_STATE_RESUMING;
13704  	status = hcd->driver->bus_resume(hcd);
13705 +	clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
13706  	if (status == 0) {
13707  		/* TRSMRCY = 10 msec */
13708  		msleep(10);
13709 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
13710 index 84c18971..6c16c4f 100644
13711 --- a/drivers/usb/core/hub.c
13712 +++ b/drivers/usb/core/hub.c
13713 @@ -677,6 +677,8 @@ static void hub_init_func3(struct work_struct *ws);
13714  static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
13715  {
13716  	struct usb_device *hdev = hub->hdev;
13717 +	struct usb_hcd *hcd;
13718 +	int ret;
13719  	int port1;
13720  	int status;
13721  	bool need_debounce_delay = false;
13722 @@ -715,6 +717,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
13723  			usb_autopm_get_interface_no_resume(
13724  					to_usb_interface(hub->intfdev));
13725  			return;		/* Continues at init2: below */
13726 +		} else if (type == HUB_RESET_RESUME) {
13727 +			/* The internal host controller state for the hub device
13728 +			 * may be gone after a host power loss on system resume.
13729 +			 * Update the device's info so the HW knows it's a hub.
13730 +			 */
13731 +			hcd = bus_to_hcd(hdev->bus);
13732 +			if (hcd->driver->update_hub_device) {
13733 +				ret = hcd->driver->update_hub_device(hcd, hdev,
13734 +						&hub->tt, GFP_NOIO);
13735 +				if (ret < 0) {
13736 +					dev_err(hub->intfdev, "Host not "
13737 +							"accepting hub info "
13738 +							"update.\n");
13739 +					dev_err(hub->intfdev, "LS/FS devices "
13740 +							"and hubs may not work "
13741 +							"under this hub\n.");
13742 +				}
13743 +			}
13744 +			hub_power_on(hub, true);
13745  		} else {
13746  			hub_power_on(hub, true);
13747  		}
13748 @@ -2722,6 +2743,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
13749  		udev->ttport = hdev->ttport;
13750  	} else if (udev->speed != USB_SPEED_HIGH
13751  			&& hdev->speed == USB_SPEED_HIGH) {
13752 +		if (!hub->tt.hub) {
13753 +			dev_err(&udev->dev, "parent hub has no TT\n");
13754 +			retval = -EINVAL;
13755 +			goto fail;
13756 +		}
13757  		udev->tt = &hub->tt;
13758  		udev->ttport = port1;
13759  	}
13760 @@ -2860,13 +2886,16 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
13761  	else
13762  		i = udev->descriptor.bMaxPacketSize0;
13763  	if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
13764 -		if (udev->speed != USB_SPEED_FULL ||
13765 +		if (udev->speed == USB_SPEED_LOW ||
13766  				!(i == 8 || i == 16 || i == 32 || i == 64)) {
13767 -			dev_err(&udev->dev, "ep0 maxpacket = %d\n", i);
13768 +			dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
13769  			retval = -EMSGSIZE;
13770  			goto fail;
13771  		}
13772 -		dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
13773 +		if (udev->speed == USB_SPEED_FULL)
13774 +			dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
13775 +		else
13776 +			dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
13777  		udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
13778  		usb_ep0_reinit(udev);
13779  	}
13780 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
13781 index 9f0ce7d..d6e3e41 100644
13782 --- a/drivers/usb/core/message.c
13783 +++ b/drivers/usb/core/message.c
13784 @@ -1140,13 +1140,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
13785  {
13786  	int i;
13787  
13788 -	dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
13789 -		skip_ep0 ? "non-ep0" : "all");
13790 -	for (i = skip_ep0; i < 16; ++i) {
13791 -		usb_disable_endpoint(dev, i, true);
13792 -		usb_disable_endpoint(dev, i + USB_DIR_IN, true);
13793 -	}
13794 -
13795  	/* getting rid of interfaces will disconnect
13796  	 * any drivers bound to them (a key side effect)
13797  	 */
13798 @@ -1176,6 +1169,13 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
13799  		if (dev->state == USB_STATE_CONFIGURED)
13800  			usb_set_device_state(dev, USB_STATE_ADDRESS);
13801  	}
13802 +
13803 +	dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
13804 +		skip_ep0 ? "non-ep0" : "all");
13805 +	for (i = skip_ep0; i < 16; ++i) {
13806 +		usb_disable_endpoint(dev, i, true);
13807 +		usb_disable_endpoint(dev, i + USB_DIR_IN, true);
13808 +	}
13809  }
13810  
13811  /**
13812 diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
13813 index d623c7b..2d19d88 100644
13814 --- a/drivers/usb/gadget/atmel_usba_udc.c
13815 +++ b/drivers/usb/gadget/atmel_usba_udc.c
13816 @@ -2014,6 +2014,9 @@ static int __init usba_udc_probe(struct platform_device *pdev)
13817  			} else {
13818  				disable_irq(gpio_to_irq(udc->vbus_pin));
13819  			}
13820 +		} else {
13821 +			/* gpio_request fail so use -EINVAL for gpio_is_valid */
13822 +			udc->vbus_pin = -EINVAL;
13823  		}
13824  	}
13825  
13826 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
13827 index 1160c55..67746b1 100644
13828 --- a/drivers/usb/gadget/composite.c
13829 +++ b/drivers/usb/gadget/composite.c
13830 @@ -901,7 +901,8 @@ unknown:
13831  		 */
13832  		switch (ctrl->bRequestType & USB_RECIP_MASK) {
13833  		case USB_RECIP_INTERFACE:
13834 -			f = cdev->config->interface[intf];
13835 +			if (cdev->config)
13836 +				f = cdev->config->interface[intf];
13837  			break;
13838  
13839  		case USB_RECIP_ENDPOINT:
13840 @@ -1082,14 +1083,6 @@ static int composite_bind(struct usb_gadget *gadget)
13841  	 */
13842  	usb_ep_autoconfig_reset(cdev->gadget);
13843  
13844 -	/* standardized runtime overrides for device ID data */
13845 -	if (idVendor)
13846 -		cdev->desc.idVendor = cpu_to_le16(idVendor);
13847 -	if (idProduct)
13848 -		cdev->desc.idProduct = cpu_to_le16(idProduct);
13849 -	if (bcdDevice)
13850 -		cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
13851 -
13852  	/* composite gadget needs to assign strings for whole device (like
13853  	 * serial number), register function drivers, potentially update
13854  	 * power state and consumption, etc
13855 @@ -1101,6 +1094,14 @@ static int composite_bind(struct usb_gadget *gadget)
13856  	cdev->desc = *composite->dev;
13857  	cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
13858  
13859 +	/* standardized runtime overrides for device ID data */
13860 +	if (idVendor)
13861 +		cdev->desc.idVendor = cpu_to_le16(idVendor);
13862 +	if (idProduct)
13863 +		cdev->desc.idProduct = cpu_to_le16(idProduct);
13864 +	if (bcdDevice)
13865 +		cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
13866 +
13867  	/* strings can't be assigned before bind() allocates the
13868  	 * releavnt identifiers
13869  	 */
13870 diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
13871 index d47a123..bd6226c 100644
13872 --- a/drivers/usb/gadget/f_acm.c
13873 +++ b/drivers/usb/gadget/f_acm.c
13874 @@ -111,7 +111,7 @@ acm_iad_descriptor = {
13875  	.bInterfaceCount = 	2,	// control + data
13876  	.bFunctionClass =	USB_CLASS_COMM,
13877  	.bFunctionSubClass =	USB_CDC_SUBCLASS_ACM,
13878 -	.bFunctionProtocol =	USB_CDC_PROTO_NONE,
13879 +	.bFunctionProtocol =	USB_CDC_ACM_PROTO_AT_V25TER,
13880  	/* .iFunction =		DYNAMIC */
13881  };
13882  
13883 diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
13884 index a9474f8..3c2f0a4 100644
13885 --- a/drivers/usb/gadget/g_ffs.c
13886 +++ b/drivers/usb/gadget/g_ffs.c
13887 @@ -53,8 +53,8 @@ MODULE_AUTHOR("Michal Nazarewicz");
13888  MODULE_LICENSE("GPL");
13889  
13890  
13891 -static unsigned short gfs_vendor_id    = 0x0525;	/* XXX NetChip */
13892 -static unsigned short gfs_product_id   = 0xa4ac;	/* XXX */
13893 +static unsigned short gfs_vendor_id    = 0x1d6b;	/* Linux Foundation */
13894 +static unsigned short gfs_product_id   = 0x0105;	/* FunctionFS Gadget */
13895  
13896  static struct usb_device_descriptor gfs_dev_desc = {
13897  	.bLength		= sizeof gfs_dev_desc,
13898 diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
13899 index 795d762..36d67a3 100644
13900 --- a/drivers/usb/gadget/multi.c
13901 +++ b/drivers/usb/gadget/multi.c
13902 @@ -74,8 +74,8 @@ MODULE_LICENSE("GPL");
13903  
13904  /***************************** Device Descriptor ****************************/
13905  
13906 -#define MULTI_VENDOR_NUM	0x0525	/* XXX NetChip */
13907 -#define MULTI_PRODUCT_NUM	0xa4ab	/* XXX */
13908 +#define MULTI_VENDOR_NUM	0x1d6b	/* Linux Foundation */
13909 +#define MULTI_PRODUCT_NUM	0x0104	/* Multifunction Composite Gadget */
13910  
13911  
13912  enum {
13913 diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
13914 index cf241c3..15222eb 100644
13915 --- a/drivers/usb/gadget/printer.c
13916 +++ b/drivers/usb/gadget/printer.c
13917 @@ -131,31 +131,31 @@ static struct printer_dev usb_printer_gadget;
13918   * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
13919   */
13920  
13921 -static ushort __initdata idVendor;
13922 +static ushort idVendor;
13923  module_param(idVendor, ushort, S_IRUGO);
13924  MODULE_PARM_DESC(idVendor, "USB Vendor ID");
13925  
13926 -static ushort __initdata idProduct;
13927 +static ushort idProduct;
13928  module_param(idProduct, ushort, S_IRUGO);
13929  MODULE_PARM_DESC(idProduct, "USB Product ID");
13930  
13931 -static ushort __initdata bcdDevice;
13932 +static ushort bcdDevice;
13933  module_param(bcdDevice, ushort, S_IRUGO);
13934  MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
13935  
13936 -static char *__initdata iManufacturer;
13937 +static char *iManufacturer;
13938  module_param(iManufacturer, charp, S_IRUGO);
13939  MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
13940  
13941 -static char *__initdata iProduct;
13942 +static char *iProduct;
13943  module_param(iProduct, charp, S_IRUGO);
13944  MODULE_PARM_DESC(iProduct, "USB Product string");
13945  
13946 -static char *__initdata iSerialNum;
13947 +static char *iSerialNum;
13948  module_param(iSerialNum, charp, S_IRUGO);
13949  MODULE_PARM_DESC(iSerialNum, "1");
13950  
13951 -static char *__initdata iPNPstring;
13952 +static char *iPNPstring;
13953  module_param(iPNPstring, charp, S_IRUGO);
13954  MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
13955  
13956 @@ -1596,13 +1596,12 @@ cleanup(void)
13957  	int status;
13958  
13959  	mutex_lock(&usb_printer_gadget.lock_printer_io);
13960 -	class_destroy(usb_gadget_class);
13961 -	unregister_chrdev_region(g_printer_devno, 2);
13962 -
13963  	status = usb_gadget_unregister_driver(&printer_driver);
13964  	if (status)
13965  		ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
13966  
13967 +	unregister_chrdev_region(g_printer_devno, 2);
13968 +	class_destroy(usb_gadget_class);
13969  	mutex_unlock(&usb_printer_gadget.lock_printer_io);
13970  }
13971  module_exit(cleanup);
13972 diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
13973 index 2baf8a8..a869e3c 100644
13974 --- a/drivers/usb/host/ehci-au1xxx.c
13975 +++ b/drivers/usb/host/ehci-au1xxx.c
13976 @@ -227,8 +227,8 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
13977  	 * mark HW unaccessible.  The PM and USB cores make sure that
13978  	 * the root hub is either suspended or stopped.
13979  	 */
13980 -	spin_lock_irqsave(&ehci->lock, flags);
13981  	ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
13982 +	spin_lock_irqsave(&ehci->lock, flags);
13983  	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
13984  	(void)ehci_readl(ehci, &ehci->regs->intr_enable);
13985  
13986 diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
13987 index 76b7fd2..b349021 100644
13988 --- a/drivers/usb/host/ehci-dbg.c
13989 +++ b/drivers/usb/host/ehci-dbg.c
13990 @@ -1063,7 +1063,7 @@ static inline void create_debug_files (struct ehci_hcd *ehci)
13991  						    &debug_registers_fops))
13992  		goto file_error;
13993  
13994 -	if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus,
13995 +	if (!debugfs_create_file("lpm", S_IRUGO|S_IWUSR, ehci->debug_dir, bus,
13996  						    &debug_lpm_fops))
13997  		goto file_error;
13998  
13999 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
14000 index 34a928d..597ed10 100644
14001 --- a/drivers/usb/host/ehci-hcd.c
14002 +++ b/drivers/usb/host/ehci-hcd.c
14003 @@ -114,6 +114,9 @@ MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
14004  
14005  #define	INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
14006  
14007 +/* for ASPM quirk of ISOC on AMD SB800 */
14008 +static struct pci_dev *amd_nb_dev;
14009 +
14010  /*-------------------------------------------------------------------------*/
14011  
14012  #include "ehci.h"
14013 @@ -514,6 +517,11 @@ static void ehci_stop (struct usb_hcd *hcd)
14014  	spin_unlock_irq (&ehci->lock);
14015  	ehci_mem_cleanup (ehci);
14016  
14017 +	if (amd_nb_dev) {
14018 +		pci_dev_put(amd_nb_dev);
14019 +		amd_nb_dev = NULL;
14020 +	}
14021 +
14022  #ifdef	EHCI_STATS
14023  	ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
14024  		ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
14025 @@ -549,6 +557,8 @@ static int ehci_init(struct usb_hcd *hcd)
14026  	ehci->iaa_watchdog.function = ehci_iaa_watchdog;
14027  	ehci->iaa_watchdog.data = (unsigned long) ehci;
14028  
14029 +	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
14030 +
14031  	/*
14032  	 * hw default: 1K periodic list heads, one per frame.
14033  	 * periodic_size can shrink by USBCMD update if hcc_params allows.
14034 @@ -556,11 +566,20 @@ static int ehci_init(struct usb_hcd *hcd)
14035  	ehci->periodic_size = DEFAULT_I_TDPS;
14036  	INIT_LIST_HEAD(&ehci->cached_itd_list);
14037  	INIT_LIST_HEAD(&ehci->cached_sitd_list);
14038 +
14039 +	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
14040 +		/* periodic schedule size can be smaller than default */
14041 +		switch (EHCI_TUNE_FLS) {
14042 +		case 0: ehci->periodic_size = 1024; break;
14043 +		case 1: ehci->periodic_size = 512; break;
14044 +		case 2: ehci->periodic_size = 256; break;
14045 +		default:	BUG();
14046 +		}
14047 +	}
14048  	if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
14049  		return retval;
14050  
14051  	/* controllers may cache some of the periodic schedule ... */
14052 -	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
14053  	if (HCC_ISOC_CACHE(hcc_params))		// full frame cache
14054  		ehci->i_thresh = 2 + 8;
14055  	else					// N microframes cached
14056 @@ -614,12 +633,6 @@ static int ehci_init(struct usb_hcd *hcd)
14057  		/* periodic schedule size can be smaller than default */
14058  		temp &= ~(3 << 2);
14059  		temp |= (EHCI_TUNE_FLS << 2);
14060 -		switch (EHCI_TUNE_FLS) {
14061 -		case 0: ehci->periodic_size = 1024; break;
14062 -		case 1: ehci->periodic_size = 512; break;
14063 -		case 2: ehci->periodic_size = 256; break;
14064 -		default:	BUG();
14065 -		}
14066  	}
14067  	if (HCC_LPM(hcc_params)) {
14068  		/* support link power management EHCI 1.1 addendum */
14069 @@ -1048,10 +1061,11 @@ rescan:
14070  				tmp && tmp != qh;
14071  				tmp = tmp->qh_next.qh)
14072  			continue;
14073 -		/* periodic qh self-unlinks on empty */
14074 -		if (!tmp)
14075 -			goto nogood;
14076 -		unlink_async (ehci, qh);
14077 +		/* periodic qh self-unlinks on empty, and a COMPLETING qh
14078 +		 * may already be unlinked.
14079 +		 */
14080 +		if (tmp)
14081 +			unlink_async(ehci, qh);
14082  		/* FALL THROUGH */
14083  	case QH_STATE_UNLINK:		/* wait for hw to finish? */
14084  	case QH_STATE_UNLINK_WAIT:
14085 @@ -1068,7 +1082,6 @@ idle_timeout:
14086  		}
14087  		/* else FALL THROUGH */
14088  	default:
14089 -nogood:
14090  		/* caller was supposed to have unlinked any requests;
14091  		 * that's not our job.  just leak this memory.
14092  		 */
14093 diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
14094 index 796ea0c..8a515f0 100644
14095 --- a/drivers/usb/host/ehci-hub.c
14096 +++ b/drivers/usb/host/ehci-hub.c
14097 @@ -111,6 +111,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
14098  {
14099  	int		port;
14100  	u32		temp;
14101 +	unsigned long	flags;
14102  
14103  	/* If remote wakeup is enabled for the root hub but disabled
14104  	 * for the controller, we must adjust all the port wakeup flags
14105 @@ -120,6 +121,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
14106  	if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
14107  		return;
14108  
14109 +	spin_lock_irqsave(&ehci->lock, flags);
14110 +
14111  	/* clear phy low-power mode before changing wakeup flags */
14112  	if (ehci->has_hostpc) {
14113  		port = HCS_N_PORTS(ehci->hcs_params);
14114 @@ -131,7 +134,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
14115  			temp = ehci_readl(ehci, hostpc_reg);
14116  			ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
14117  		}
14118 +		spin_unlock_irqrestore(&ehci->lock, flags);
14119  		msleep(5);
14120 +		spin_lock_irqsave(&ehci->lock, flags);
14121  	}
14122  
14123  	port = HCS_N_PORTS(ehci->hcs_params);
14124 @@ -170,6 +175,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
14125  	/* Does the root hub have a port wakeup pending? */
14126  	if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
14127  		usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
14128 +
14129 +	spin_unlock_irqrestore(&ehci->lock, flags);
14130  }
14131  
14132  static int ehci_bus_suspend (struct usb_hcd *hcd)
14133 diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
14134 index a1e8d27..566791e 100644
14135 --- a/drivers/usb/host/ehci-pci.c
14136 +++ b/drivers/usb/host/ehci-pci.c
14137 @@ -41,6 +41,42 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
14138  	return 0;
14139  }
14140  
14141 +static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
14142 +{
14143 +	struct pci_dev *amd_smbus_dev;
14144 +	u8 rev = 0;
14145 +
14146 +	amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
14147 +	if (amd_smbus_dev) {
14148 +		pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
14149 +		if (rev < 0x40) {
14150 +			pci_dev_put(amd_smbus_dev);
14151 +			amd_smbus_dev = NULL;
14152 +			return 0;
14153 +		}
14154 +	} else {
14155 +		amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
14156 +		if (!amd_smbus_dev)
14157 +			return 0;
14158 +		pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
14159 +		if (rev < 0x11 || rev > 0x18) {
14160 +			pci_dev_put(amd_smbus_dev);
14161 +			amd_smbus_dev = NULL;
14162 +			return 0;
14163 +		}
14164 +	}
14165 +
14166 +	if (!amd_nb_dev)
14167 +		amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
14168 +
14169 +	ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
14170 +
14171 +	pci_dev_put(amd_smbus_dev);
14172 +	amd_smbus_dev = NULL;
14173 +
14174 +	return 1;
14175 +}
14176 +
14177  /* called during probe() after chip reset completes */
14178  static int ehci_pci_setup(struct usb_hcd *hcd)
14179  {
14180 @@ -99,6 +135,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
14181  	/* cache this readonly data; minimize chip reads */
14182  	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
14183  
14184 +	if (ehci_quirk_amd_hudson(ehci))
14185 +		ehci->amd_l1_fix = 1;
14186 +
14187  	retval = ehci_halt(ehci);
14188  	if (retval)
14189  		return retval;
14190 @@ -148,6 +187,18 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
14191  			if (pdev->revision < 0xa4)
14192  				ehci->no_selective_suspend = 1;
14193  			break;
14194 +
14195 +		/* MCP89 chips on the MacBookAir3,1 give EPROTO when
14196 +		 * fetching device descriptors unless LPM is disabled.
14197 +		 * There are also intermittent problems enumerating
14198 +		 * devices with PPCD enabled.
14199 +		 */
14200 +		case 0x0d9d:
14201 +			ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89");
14202 +			ehci->has_lpm = 0;
14203 +			ehci->has_ppcd = 0;
14204 +			ehci->command &= ~CMD_PPCEE;
14205 +			break;
14206  		}
14207  		break;
14208  	case PCI_VENDOR_ID_VIA:
14209 @@ -296,8 +347,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
14210  	 * mark HW unaccessible.  The PM and USB cores make sure that
14211  	 * the root hub is either suspended or stopped.
14212  	 */
14213 -	spin_lock_irqsave (&ehci->lock, flags);
14214  	ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
14215 +	spin_lock_irqsave (&ehci->lock, flags);
14216  	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
14217  	(void)ehci_readl(ehci, &ehci->regs->intr_enable);
14218  
14219 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
14220 index a92526d..724ba71 100644
14221 --- a/drivers/usb/host/ehci-sched.c
14222 +++ b/drivers/usb/host/ehci-sched.c
14223 @@ -1583,6 +1583,63 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
14224  	*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
14225  }
14226  
14227 +#define AB_REG_BAR_LOW 0xe0
14228 +#define AB_REG_BAR_HIGH 0xe1
14229 +#define AB_INDX(addr) ((addr) + 0x00)
14230 +#define AB_DATA(addr) ((addr) + 0x04)
14231 +#define NB_PCIE_INDX_ADDR 0xe0
14232 +#define NB_PCIE_INDX_DATA 0xe4
14233 +#define NB_PIF0_PWRDOWN_0 0x01100012
14234 +#define NB_PIF0_PWRDOWN_1 0x01100013
14235 +
14236 +static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable)
14237 +{
14238 +	u32 addr, addr_low, addr_high, val;
14239 +
14240 +	outb_p(AB_REG_BAR_LOW, 0xcd6);
14241 +	addr_low = inb_p(0xcd7);
14242 +	outb_p(AB_REG_BAR_HIGH, 0xcd6);
14243 +	addr_high = inb_p(0xcd7);
14244 +	addr = addr_high << 8 | addr_low;
14245 +	outl_p(0x30, AB_INDX(addr));
14246 +	outl_p(0x40, AB_DATA(addr));
14247 +	outl_p(0x34, AB_INDX(addr));
14248 +	val = inl_p(AB_DATA(addr));
14249 +
14250 +	if (disable) {
14251 +		val &= ~0x8;
14252 +		val |= (1 << 4) | (1 << 9);
14253 +	} else {
14254 +		val |= 0x8;
14255 +		val &= ~((1 << 4) | (1 << 9));
14256 +	}
14257 +	outl_p(val, AB_DATA(addr));
14258 +
14259 +	if (amd_nb_dev) {
14260 +		addr = NB_PIF0_PWRDOWN_0;
14261 +		pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
14262 +		pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
14263 +		if (disable)
14264 +			val &= ~(0x3f << 7);
14265 +		else
14266 +			val |= 0x3f << 7;
14267 +
14268 +		pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
14269 +
14270 +		addr = NB_PIF0_PWRDOWN_1;
14271 +		pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
14272 +		pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
14273 +		if (disable)
14274 +			val &= ~(0x3f << 7);
14275 +		else
14276 +			val |= 0x3f << 7;
14277 +
14278 +		pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
14279 +	}
14280 +
14281 +	return;
14282 +}
14283 +
14284  /* fit urb's itds into the selected schedule slot; activate as needed */
14285  static int
14286  itd_link_urb (
14287 @@ -1609,6 +1666,12 @@ itd_link_urb (
14288  			urb->interval,
14289  			next_uframe >> 3, next_uframe & 0x7);
14290  	}
14291 +
14292 +	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
14293 +		if (ehci->amd_l1_fix == 1)
14294 +			ehci_quirk_amd_L1(ehci, 1);
14295 +	}
14296 +
14297  	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
14298  
14299  	/* fill iTDs uframe by uframe */
14300 @@ -1733,6 +1796,11 @@ itd_complete (
14301  	(void) disable_periodic(ehci);
14302  	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
14303  
14304 +	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
14305 +		if (ehci->amd_l1_fix == 1)
14306 +			ehci_quirk_amd_L1(ehci, 0);
14307 +	}
14308 +
14309  	if (unlikely(list_is_singular(&stream->td_list))) {
14310  		ehci_to_hcd(ehci)->self.bandwidth_allocated
14311  				-= stream->bandwidth;
14312 @@ -2018,6 +2086,12 @@ sitd_link_urb (
14313  			(next_uframe >> 3) & (ehci->periodic_size - 1),
14314  			stream->interval, hc32_to_cpu(ehci, stream->splits));
14315  	}
14316 +
14317 +	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
14318 +		if (ehci->amd_l1_fix == 1)
14319 +			ehci_quirk_amd_L1(ehci, 1);
14320 +	}
14321 +
14322  	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
14323  
14324  	/* fill sITDs frame by frame */
14325 @@ -2118,6 +2192,11 @@ sitd_complete (
14326  	(void) disable_periodic(ehci);
14327  	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
14328  
14329 +	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
14330 +		if (ehci->amd_l1_fix == 1)
14331 +			ehci_quirk_amd_L1(ehci, 0);
14332 +	}
14333 +
14334  	if (list_is_singular(&stream->td_list)) {
14335  		ehci_to_hcd(ehci)->self.bandwidth_allocated
14336  				-= stream->bandwidth;
14337 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
14338 index bde823f..fd1c53d 100644
14339 --- a/drivers/usb/host/ehci.h
14340 +++ b/drivers/usb/host/ehci.h
14341 @@ -130,6 +130,7 @@ struct ehci_hcd {			/* one per controller */
14342  	unsigned		has_amcc_usb23:1;
14343  	unsigned		need_io_watchdog:1;
14344  	unsigned		broken_periodic:1;
14345 +	unsigned		amd_l1_fix:1;
14346  	unsigned		fs_i_thresh:1;	/* Intel iso scheduling */
14347  
14348  	/* required for usb32 quirk */
14349 diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
14350 index 10e1872..931d588 100644
14351 --- a/drivers/usb/host/ohci-jz4740.c
14352 +++ b/drivers/usb/host/ohci-jz4740.c
14353 @@ -273,4 +273,4 @@ static struct platform_driver ohci_hcd_jz4740_driver = {
14354  	},
14355  };
14356  
14357 -MODULE_ALIAS("platfrom:jz4740-ohci");
14358 +MODULE_ALIAS("platform:jz4740-ohci");
14359 diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
14360 index 95d0f5a..25563e9 100644
14361 --- a/drivers/usb/host/r8a66597.h
14362 +++ b/drivers/usb/host/r8a66597.h
14363 @@ -227,7 +227,7 @@ static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
14364  		int odd = len & 0x0001;
14365  
14366  		len = len / 2;
14367 -		ioread16_rep(fifoaddr, buf, len);
14368 +		iowrite16_rep(fifoaddr, buf, len);
14369  		if (unlikely(odd)) {
14370  			buf = &buf[len];
14371  			iowrite8((unsigned char)*buf, fifoaddr);
14372 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
14373 index a1a7a97..480936a 100644
14374 --- a/drivers/usb/host/xhci-hub.c
14375 +++ b/drivers/usb/host/xhci-hub.c
14376 @@ -132,6 +132,13 @@ static u32 xhci_port_state_to_neutral(u32 state)
14377  static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
14378  		u32 __iomem *addr, u32 port_status)
14379  {
14380 +	/* Don't allow the USB core to disable SuperSpeed ports. */
14381 +	if (xhci->port_array[wIndex] == 0x03) {
14382 +		xhci_dbg(xhci, "Ignoring request to disable "
14383 +				"SuperSpeed port.\n");
14384 +		return;
14385 +	}
14386 +
14387  	/* Write 1 to disable the port */
14388  	xhci_writel(xhci, port_status | PORT_PE, addr);
14389  	port_status = xhci_readl(xhci, addr);
14390 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
14391 index 4e51343..6627a95 100644
14392 --- a/drivers/usb/host/xhci-mem.c
14393 +++ b/drivers/usb/host/xhci-mem.c
14394 @@ -1043,7 +1043,7 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
14395  	if (udev->speed == USB_SPEED_SUPER)
14396  		return ep->ss_ep_comp.wBytesPerInterval;
14397  
14398 -	max_packet = ep->desc.wMaxPacketSize & 0x3ff;
14399 +	max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize);
14400  	max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
14401  	/* A 0 in max burst means 1 transfer per ESIT */
14402  	return max_packet * (max_burst + 1);
14403 @@ -1133,7 +1133,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
14404  		/* Fall through */
14405  	case USB_SPEED_FULL:
14406  	case USB_SPEED_LOW:
14407 -		max_packet = ep->desc.wMaxPacketSize & 0x3ff;
14408 +		max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize);
14409  		ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
14410  		break;
14411  	default:
14412 @@ -1441,6 +1441,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
14413  	xhci->dcbaa = NULL;
14414  
14415  	scratchpad_free(xhci);
14416 +
14417 +	xhci->num_usb2_ports = 0;
14418 +	xhci->num_usb3_ports = 0;
14419 +	kfree(xhci->usb2_ports);
14420 +	kfree(xhci->usb3_ports);
14421 +	kfree(xhci->port_array);
14422 +
14423  	xhci->page_size = 0;
14424  	xhci->page_shift = 0;
14425  }
14426 @@ -1624,6 +1631,166 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
14427  			&xhci->ir_set->erst_dequeue);
14428  }
14429  
14430 +static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
14431 +		u32 __iomem *addr, u8 major_revision)
14432 +{
14433 +	u32 temp, port_offset, port_count;
14434 +	int i;
14435 +
14436 +	if (major_revision > 0x03) {
14437 +		xhci_warn(xhci, "Ignoring unknown port speed, "
14438 +				"Ext Cap %p, revision = 0x%x\n",
14439 +				addr, major_revision);
14440 +		/* Ignoring port protocol we can't understand. FIXME */
14441 +		return;
14442 +	}
14443 +
14444 +	/* Port offset and count in the third dword, see section 7.2 */
14445 +	temp = xhci_readl(xhci, addr + 2);
14446 +	port_offset = XHCI_EXT_PORT_OFF(temp);
14447 +	port_count = XHCI_EXT_PORT_COUNT(temp);
14448 +	xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
14449 +			"count = %u, revision = 0x%x\n",
14450 +			addr, port_offset, port_count, major_revision);
14451 +	/* Port count includes the current port offset */
14452 +	if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
14453 +		/* WTF? "Valid values are ‘1’ to MaxPorts" */
14454 +		return;
14455 +	port_offset--;
14456 +	for (i = port_offset; i < (port_offset + port_count); i++) {
14457 +		/* Duplicate entry.  Ignore the port if the revisions differ. */
14458 +		if (xhci->port_array[i] != 0) {
14459 +			xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
14460 +					" port %u\n", addr, i);
14461 +			xhci_warn(xhci, "Port was marked as USB %u, "
14462 +					"duplicated as USB %u\n",
14463 +					xhci->port_array[i], major_revision);
14464 +			/* Only adjust the roothub port counts if we haven't
14465 +			 * found a similar duplicate.
14466 +			 */
14467 +			if (xhci->port_array[i] != major_revision &&
14468 +				xhci->port_array[i] != (u8) -1) {
14469 +				if (xhci->port_array[i] == 0x03)
14470 +					xhci->num_usb3_ports--;
14471 +				else
14472 +					xhci->num_usb2_ports--;
14473 +				xhci->port_array[i] = (u8) -1;
14474 +			}
14475 +			/* FIXME: Should we disable the port? */
14476 +			continue;
14477 +		}
14478 +		xhci->port_array[i] = major_revision;
14479 +		if (major_revision == 0x03)
14480 +			xhci->num_usb3_ports++;
14481 +		else
14482 +			xhci->num_usb2_ports++;
14483 +	}
14484 +	/* FIXME: Should we disable ports not in the Extended Capabilities? */
14485 +}
14486 +
14487 +/*
14488 + * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
14489 + * specify what speeds each port is supposed to be.  We can't count on the port
14490 + * speed bits in the PORTSC register being correct until a device is connected,
14491 + * but we need to set up the two fake roothubs with the correct number of USB
14492 + * 3.0 and USB 2.0 ports at host controller initialization time.
14493 + */
14494 +static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
14495 +{
14496 +	u32 __iomem *addr;
14497 +	u32 offset;
14498 +	unsigned int num_ports;
14499 +	int i, port_index;
14500 +
14501 +	addr = &xhci->cap_regs->hcc_params;
14502 +	offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
14503 +	if (offset == 0) {
14504 +		xhci_err(xhci, "No Extended Capability registers, "
14505 +				"unable to set up roothub.\n");
14506 +		return -ENODEV;
14507 +	}
14508 +
14509 +	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
14510 +	xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
14511 +	if (!xhci->port_array)
14512 +		return -ENOMEM;
14513 +
14514 +	/*
14515 +	 * For whatever reason, the first capability offset is from the
14516 +	 * capability register base, not from the HCCPARAMS register.
14517 +	 * See section 5.3.6 for offset calculation.
14518 +	 */
14519 +	addr = &xhci->cap_regs->hc_capbase + offset;
14520 +	while (1) {
14521 +		u32 cap_id;
14522 +
14523 +		cap_id = xhci_readl(xhci, addr);
14524 +		if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
14525 +			xhci_add_in_port(xhci, num_ports, addr,
14526 +					(u8) XHCI_EXT_PORT_MAJOR(cap_id));
14527 +		offset = XHCI_EXT_CAPS_NEXT(cap_id);
14528 +		if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
14529 +				== num_ports)
14530 +			break;
14531 +		/*
14532 +		 * Once you're into the Extended Capabilities, the offset is
14533 +		 * always relative to the register holding the offset.
14534 +		 */
14535 +		addr += offset;
14536 +	}
14537 +
14538 +	if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
14539 +		xhci_warn(xhci, "No ports on the roothubs?\n");
14540 +		return -ENODEV;
14541 +	}
14542 +	xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
14543 +			xhci->num_usb2_ports, xhci->num_usb3_ports);
14544 +	/*
14545 +	 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
14546 +	 * Not sure how the USB core will handle a hub with no ports...
14547 +	 */
14548 +	if (xhci->num_usb2_ports) {
14549 +		xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
14550 +				xhci->num_usb2_ports, flags);
14551 +		if (!xhci->usb2_ports)
14552 +			return -ENOMEM;
14553 +
14554 +		port_index = 0;
14555 +		for (i = 0; i < num_ports; i++) {
14556 +			if (xhci->port_array[i] == 0x03 ||
14557 +					xhci->port_array[i] == 0 ||
14558 +					xhci->port_array[i] == -1)
14559 +				continue;
14560 +
14561 +			xhci->usb2_ports[port_index] =
14562 +				&xhci->op_regs->port_status_base +
14563 +				NUM_PORT_REGS*i;
14564 +			xhci_dbg(xhci, "USB 2.0 port at index %u, "
14565 +					"addr = %p\n", i,
14566 +					xhci->usb2_ports[port_index]);
14567 +			port_index++;
14568 +		}
14569 +	}
14570 +	if (xhci->num_usb3_ports) {
14571 +		xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
14572 +				xhci->num_usb3_ports, flags);
14573 +		if (!xhci->usb3_ports)
14574 +			return -ENOMEM;
14575 +
14576 +		port_index = 0;
14577 +		for (i = 0; i < num_ports; i++)
14578 +			if (xhci->port_array[i] == 0x03) {
14579 +				xhci->usb3_ports[port_index] =
14580 +					&xhci->op_regs->port_status_base +
14581 +					NUM_PORT_REGS*i;
14582 +				xhci_dbg(xhci, "USB 3.0 port at index %u, "
14583 +						"addr = %p\n", i,
14584 +						xhci->usb3_ports[port_index]);
14585 +				port_index++;
14586 +			}
14587 +	}
14588 +	return 0;
14589 +}
14590  
14591  int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
14592  {
14593 @@ -1804,6 +1971,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
14594  
14595  	if (scratchpad_alloc(xhci, flags))
14596  		goto fail;
14597 +	if (xhci_setup_port_arrays(xhci, flags))
14598 +		goto fail;
14599  
14600  	return 0;
14601  
14602 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
14603 index 48e60d1..e7547d8 100644
14604 --- a/drivers/usb/host/xhci-ring.c
14605 +++ b/drivers/usb/host/xhci-ring.c
14606 @@ -2028,7 +2028,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
14607  
14608  	if (!(status & STS_EINT)) {
14609  		spin_unlock(&xhci->lock);
14610 -		xhci_warn(xhci, "Spurious interrupt.\n");
14611  		return IRQ_NONE;
14612  	}
14613  	xhci_dbg(xhci, "op reg status = %08x\n", status);
14614 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
14615 index d5c550e..96ef552 100644
14616 --- a/drivers/usb/host/xhci.c
14617 +++ b/drivers/usb/host/xhci.c
14618 @@ -508,9 +508,10 @@ void xhci_stop(struct usb_hcd *hcd)
14619  	spin_lock_irq(&xhci->lock);
14620  	xhci_halt(xhci);
14621  	xhci_reset(xhci);
14622 -	xhci_cleanup_msix(xhci);
14623  	spin_unlock_irq(&xhci->lock);
14624  
14625 +	xhci_cleanup_msix(xhci);
14626 +
14627  #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
14628  	/* Tell the event ring poll function not to reschedule */
14629  	xhci->zombie = 1;
14630 @@ -544,9 +545,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
14631  
14632  	spin_lock_irq(&xhci->lock);
14633  	xhci_halt(xhci);
14634 -	xhci_cleanup_msix(xhci);
14635  	spin_unlock_irq(&xhci->lock);
14636  
14637 +	xhci_cleanup_msix(xhci);
14638 +
14639  	xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
14640  		    xhci_readl(xhci, &xhci->op_regs->status));
14641  }
14642 @@ -1284,6 +1286,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
14643  		cmd_completion = command->completion;
14644  		cmd_status = &command->status;
14645  		command->command_trb = xhci->cmd_ring->enqueue;
14646 +
14647 +		/* Enqueue pointer can be left pointing to the link TRB,
14648 +		 * we must handle that
14649 +		 */
14650 +		if ((command->command_trb->link.control & TRB_TYPE_BITMASK)
14651 +				== TRB_TYPE(TRB_LINK))
14652 +			command->command_trb =
14653 +				xhci->cmd_ring->enq_seg->next->trbs;
14654 +
14655  		list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
14656  	} else {
14657  		in_ctx = virt_dev->in_ctx;
14658 @@ -1993,6 +2004,15 @@ int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
14659  	/* Attempt to submit the Reset Device command to the command ring */
14660  	spin_lock_irqsave(&xhci->lock, flags);
14661  	reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
14662 +
14663 +	/* Enqueue pointer can be left pointing to the link TRB,
14664 +	 * we must handle that
14665 +	 */
14666 +	if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK)
14667 +			== TRB_TYPE(TRB_LINK))
14668 +		reset_device_cmd->command_trb =
14669 +			xhci->cmd_ring->enq_seg->next->trbs;
14670 +
14671  	list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
14672  	ret = xhci_queue_reset_device(xhci, slot_id);
14673  	if (ret) {
14674 @@ -2148,8 +2168,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
14675  		xhci_err(xhci, "Error while assigning device slot ID\n");
14676  		return 0;
14677  	}
14678 -	/* xhci_alloc_virt_device() does not touch rings; no need to lock */
14679 -	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
14680 +	/* xhci_alloc_virt_device() does not touch rings; no need to lock.
14681 +	 * Use GFP_NOIO, since this function can be called from
14682 +	 * xhci_discover_or_reset_device(), which may be called as part of
14683 +	 * mass storage driver error handling.
14684 +	 */
14685 +	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
14686  		/* Disable slot, if we can do it without mem alloc */
14687  		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
14688  		spin_lock_irqsave(&xhci->lock, flags);
14689 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
14690 index 34a60d9..404ecbc 100644
14691 --- a/drivers/usb/host/xhci.h
14692 +++ b/drivers/usb/host/xhci.h
14693 @@ -448,6 +448,24 @@ struct xhci_doorbell_array {
14694  
14695  
14696  /**
14697 + * struct xhci_protocol_caps
14698 + * @revision:		major revision, minor revision, capability ID,
14699 + *			and next capability pointer.
14700 + * @name_string:	Four ASCII characters to say which spec this xHC
14701 + *			follows, typically "USB ".
14702 + * @port_info:		Port offset, count, and protocol-defined information.
14703 + */
14704 +struct xhci_protocol_caps {
14705 +	u32	revision;
14706 +	u32	name_string;
14707 +	u32	port_info;
14708 +};
14709 +
14710 +#define	XHCI_EXT_PORT_MAJOR(x)	(((x) >> 24) & 0xff)
14711 +#define	XHCI_EXT_PORT_OFF(x)	((x) & 0xff)
14712 +#define	XHCI_EXT_PORT_COUNT(x)	(((x) >> 8) & 0xff)
14713 +
14714 +/**
14715   * struct xhci_container_ctx
14716   * @type: Type of context.  Used to calculated offsets to contained contexts.
14717   * @size: Size of the context data
14718 @@ -614,6 +632,11 @@ struct xhci_ep_ctx {
14719  #define MAX_PACKET_MASK		(0xffff << 16)
14720  #define MAX_PACKET_DECODED(p)	(((p) >> 16) & 0xffff)
14721  
14722 +/* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
14723 + * USB2.0 spec 9.6.6.
14724 + */
14725 +#define GET_MAX_PACKET(p)	((p) & 0x7ff)
14726 +
14727  /* tx_info bitmasks */
14728  #define AVG_TRB_LENGTH_FOR_EP(p)	((p) & 0xffff)
14729  #define MAX_ESIT_PAYLOAD_FOR_EP(p)	(((p) & 0xffff) << 16)
14730 @@ -1199,6 +1222,15 @@ struct xhci_hcd {
14731  #define	XHCI_LINK_TRB_QUIRK	(1 << 0)
14732  #define XHCI_RESET_EP_QUIRK	(1 << 1)
14733  #define XHCI_NEC_HOST		(1 << 2)
14734 +
14735 +	/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
14736 +	u8			*port_array;
14737 +	/* Array of pointers to USB 3.0 PORTSC registers */
14738 +	u32 __iomem		**usb3_ports;
14739 +	unsigned int		num_usb3_ports;
14740 +	/* Array of pointers to USB 2.0 PORTSC registers */
14741 +	u32 __iomem		**usb2_ports;
14742 +	unsigned int		num_usb2_ports;
14743  };
14744  
14745  /* For testing purposes */
14746 diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
14747 index 2f43c57..9251773 100644
14748 --- a/drivers/usb/misc/cypress_cy7c63.c
14749 +++ b/drivers/usb/misc/cypress_cy7c63.c
14750 @@ -196,11 +196,9 @@ static ssize_t get_port1_handler(struct device *dev,
14751  	return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1);
14752  }
14753  
14754 -static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO,
14755 -		   get_port0_handler, set_port0_handler);
14756 +static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler);
14757  
14758 -static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO,
14759 -		   get_port1_handler, set_port1_handler);
14760 +static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler);
14761  
14762  
14763  static int cypress_probe(struct usb_interface *interface,
14764 diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
14765 index bc88c79..0db05b2 100644
14766 --- a/drivers/usb/misc/iowarrior.c
14767 +++ b/drivers/usb/misc/iowarrior.c
14768 @@ -553,6 +553,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
14769  			/* needed for power consumption */
14770  			struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
14771  
14772 +			memset(&info, 0, sizeof(info));
14773  			/* directly from the descriptor */
14774  			info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
14775  			info.product = dev->product_id;
14776 diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
14777 index 70d00e9..dd573ab 100644
14778 --- a/drivers/usb/misc/sisusbvga/sisusb.c
14779 +++ b/drivers/usb/misc/sisusbvga/sisusb.c
14780 @@ -3008,6 +3008,7 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
14781  #else
14782  			x.sisusb_conactive  = 0;
14783  #endif
14784 +			memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
14785  
14786  			if (copy_to_user((void __user *)arg, &x, sizeof(x)))
14787  				retval = -EFAULT;
14788 diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
14789 index d77aba4..f63776a 100644
14790 --- a/drivers/usb/misc/trancevibrator.c
14791 +++ b/drivers/usb/misc/trancevibrator.c
14792 @@ -86,7 +86,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr,
14793  	return count;
14794  }
14795  
14796 -static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed);
14797 +static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed);
14798  
14799  static int tv_probe(struct usb_interface *interface,
14800  		    const struct usb_device_id *id)
14801 diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
14802 index 63da2c3..c96f51d 100644
14803 --- a/drivers/usb/misc/usbled.c
14804 +++ b/drivers/usb/misc/usbled.c
14805 @@ -94,7 +94,7 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co
14806  	change_color(led);						\
14807  	return count;							\
14808  }									\
14809 -static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value);
14810 +static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value);
14811  show_set(blue);
14812  show_set(red);
14813  show_set(green);
14814 diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
14815 index de8ef94..417b8f2 100644
14816 --- a/drivers/usb/misc/usbsevseg.c
14817 +++ b/drivers/usb/misc/usbsevseg.c
14818 @@ -192,7 +192,7 @@ static ssize_t set_attr_##name(struct device *dev, 		\
14819  								\
14820  	return count;						\
14821  }								\
14822 -static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name);
14823 +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name);
14824  
14825  static ssize_t show_attr_text(struct device *dev,
14826  	struct device_attribute *attr, char *buf)
14827 @@ -223,7 +223,7 @@ static ssize_t set_attr_text(struct device *dev,
14828  	return count;
14829  }
14830  
14831 -static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text);
14832 +static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text);
14833  
14834  static ssize_t show_attr_decimals(struct device *dev,
14835  	struct device_attribute *attr, char *buf)
14836 @@ -272,8 +272,7 @@ static ssize_t set_attr_decimals(struct device *dev,
14837  	return count;
14838  }
14839  
14840 -static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO,
14841 -	show_attr_decimals, set_attr_decimals);
14842 +static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals);
14843  
14844  static ssize_t show_attr_textmode(struct device *dev,
14845  	struct device_attribute *attr, char *buf)
14846 @@ -319,8 +318,7 @@ static ssize_t set_attr_textmode(struct device *dev,
14847  	return -EINVAL;
14848  }
14849  
14850 -static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO,
14851 -	show_attr_textmode, set_attr_textmode);
14852 +static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode);
14853  
14854  
14855  MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
14856 diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
14857 index 796e2f6..4ff2158 100644
14858 --- a/drivers/usb/misc/uss720.c
14859 +++ b/drivers/usb/misc/uss720.c
14860 @@ -3,7 +3,7 @@
14861  /*
14862   *	uss720.c  --  USS720 USB Parport Cable.
14863   *
14864 - *	Copyright (C) 1999, 2005
14865 + *	Copyright (C) 1999, 2005, 2010
14866   *	    Thomas Sailer (t.sailer@alumni.ethz.ch)
14867   *
14868   *	This program is free software; you can redistribute it and/or modify
14869 @@ -776,6 +776,8 @@ static const struct usb_device_id uss720_table[] = {
14870  	{ USB_DEVICE(0x0557, 0x2001) },
14871  	{ USB_DEVICE(0x0729, 0x1284) },
14872  	{ USB_DEVICE(0x1293, 0x0002) },
14873 +	{ USB_DEVICE(0x1293, 0x0002) },
14874 +	{ USB_DEVICE(0x050d, 0x0002) },
14875  	{ }						/* Terminating entry */
14876  };
14877  
14878 diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
14879 index b611420..611a9d2 100644
14880 --- a/drivers/usb/musb/blackfin.c
14881 +++ b/drivers/usb/musb/blackfin.c
14882 @@ -342,8 +342,10 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
14883  
14884  	usb_nop_xceiv_register();
14885  	musb->xceiv = otg_get_transceiver();
14886 -	if (!musb->xceiv)
14887 +	if (!musb->xceiv) {
14888 +		gpio_free(musb->config->gpio_vrsel);
14889  		return -ENODEV;
14890 +	}
14891  
14892  	if (ANOMALY_05000346) {
14893  		bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
14894 @@ -394,8 +396,9 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
14895  
14896  int musb_platform_exit(struct musb *musb)
14897  {
14898 -
14899  	gpio_free(musb->config->gpio_vrsel);
14900  
14901 +	otg_put_transceiver(musb->xceiv);
14902 +	usb_nop_xceiv_unregister();
14903  	return 0;
14904  }
14905 diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
14906 index 57624361..6e67629 100644
14907 --- a/drivers/usb/musb/davinci.c
14908 +++ b/drivers/usb/musb/davinci.c
14909 @@ -446,6 +446,7 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
14910  fail:
14911  	clk_disable(musb->clock);
14912  
14913 +	otg_put_transceiver(musb->xceiv);
14914  	usb_nop_xceiv_unregister();
14915  	return -ENODEV;
14916  }
14917 @@ -496,6 +497,7 @@ int musb_platform_exit(struct musb *musb)
14918  
14919  	clk_disable(musb->clock);
14920  
14921 +	otg_put_transceiver(musb->xceiv);
14922  	usb_nop_xceiv_unregister();
14923  
14924  	return 0;
14925 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
14926 index 540c766..0707b29 100644
14927 --- a/drivers/usb/musb/musb_core.c
14928 +++ b/drivers/usb/musb/musb_core.c
14929 @@ -1921,10 +1921,6 @@ static void musb_free(struct musb *musb)
14930  		dma_controller_destroy(c);
14931  	}
14932  
14933 -#ifdef CONFIG_USB_MUSB_OTG
14934 -	put_device(musb->xceiv->dev);
14935 -#endif
14936 -
14937  #ifdef CONFIG_USB_MUSB_HDRC_HCD
14938  	usb_put_hcd(musb_to_hcd(musb));
14939  #else
14940 @@ -2247,7 +2243,6 @@ static int __exit musb_remove(struct platform_device *pdev)
14941  #endif
14942  	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
14943  	musb_platform_exit(musb);
14944 -	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
14945  
14946  	musb_free(musb);
14947  	iounmap(ctrl_base);
14948 diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
14949 index 2111a24..ed618bd 100644
14950 --- a/drivers/usb/musb/omap2430.c
14951 +++ b/drivers/usb/musb/omap2430.c
14952 @@ -320,5 +320,6 @@ int musb_platform_exit(struct musb *musb)
14953  
14954  	musb_platform_suspend(musb);
14955  
14956 +	otg_put_transceiver(musb->xceiv);
14957  	return 0;
14958  }
14959 diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
14960 index 3c48e77..bde40ef 100644
14961 --- a/drivers/usb/musb/tusb6010.c
14962 +++ b/drivers/usb/musb/tusb6010.c
14963 @@ -1152,6 +1152,8 @@ done:
14964  	if (ret < 0) {
14965  		if (sync)
14966  			iounmap(sync);
14967 +
14968 +		otg_put_transceiver(musb->xceiv);
14969  		usb_nop_xceiv_unregister();
14970  	}
14971  	return ret;
14972 @@ -1166,6 +1168,8 @@ int musb_platform_exit(struct musb *musb)
14973  		musb->board_set_power(0);
14974  
14975  	iounmap(musb->sync_va);
14976 +
14977 +	otg_put_transceiver(musb->xceiv);
14978  	usb_nop_xceiv_unregister();
14979  	return 0;
14980  }
14981 diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
14982 index 63f7cc4..7b8815d 100644
14983 --- a/drivers/usb/serial/ch341.c
14984 +++ b/drivers/usb/serial/ch341.c
14985 @@ -486,12 +486,22 @@ static void ch341_read_int_callback(struct urb *urb)
14986  	if (actual_length >= 4) {
14987  		struct ch341_private *priv = usb_get_serial_port_data(port);
14988  		unsigned long flags;
14989 +		u8 prev_line_status = priv->line_status;
14990  
14991  		spin_lock_irqsave(&priv->lock, flags);
14992  		priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
14993  		if ((data[1] & CH341_MULT_STAT))
14994  			priv->multi_status_change = 1;
14995  		spin_unlock_irqrestore(&priv->lock, flags);
14996 +
14997 +		if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
14998 +			struct tty_struct *tty = tty_port_tty_get(&port->port);
14999 +			if (tty)
15000 +				usb_serial_handle_dcd_change(port, tty,
15001 +					    priv->line_status & CH341_BIT_DCD);
15002 +			tty_kref_put(tty);
15003 +		}
15004 +
15005  		wake_up_interruptible(&priv->delta_msr_wait);
15006  	}
15007  
15008 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
15009 index 4f1744c..735ea03 100644
15010 --- a/drivers/usb/serial/cp210x.c
15011 +++ b/drivers/usb/serial/cp210x.c
15012 @@ -49,11 +49,11 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
15013  static void cp210x_break_ctl(struct tty_struct *, int);
15014  static int cp210x_startup(struct usb_serial *);
15015  static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
15016 -static int cp210x_carrier_raised(struct usb_serial_port *p);
15017  
15018  static int debug;
15019  
15020  static const struct usb_device_id id_table[] = {
15021 +	{ USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
15022  	{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
15023  	{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
15024  	{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
15025 @@ -86,7 +86,6 @@ static const struct usb_device_id id_table[] = {
15026  	{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
15027  	{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
15028  	{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
15029 -	{ USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
15030  	{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
15031  	{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
15032  	{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
15033 @@ -109,7 +108,9 @@ static const struct usb_device_id id_table[] = {
15034  	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
15035  	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
15036  	{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
15037 +	{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
15038  	{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
15039 +	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
15040  	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
15041  	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
15042  	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
15043 @@ -132,6 +133,7 @@ static const struct usb_device_id id_table[] = {
15044  	{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
15045  	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
15046  	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
15047 +	{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
15048  	{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
15049  	{ } /* Terminating Entry */
15050  };
15051 @@ -163,8 +165,7 @@ static struct usb_serial_driver cp210x_device = {
15052  	.tiocmget 		= cp210x_tiocmget,
15053  	.tiocmset		= cp210x_tiocmset,
15054  	.attach			= cp210x_startup,
15055 -	.dtr_rts		= cp210x_dtr_rts,
15056 -	.carrier_raised		= cp210x_carrier_raised
15057 +	.dtr_rts		= cp210x_dtr_rts
15058  };
15059  
15060  /* Config request types */
15061 @@ -763,15 +764,6 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
15062  	return result;
15063  }
15064  
15065 -static int cp210x_carrier_raised(struct usb_serial_port *p)
15066 -{
15067 -	unsigned int control;
15068 -	cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
15069 -	if (control & CONTROL_DCD)
15070 -		return 1;
15071 -	return 0;
15072 -}
15073 -
15074  static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
15075  {
15076  	struct usb_serial_port *port = tty->driver_data;
15077 diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
15078 index b92070c..666e5a6 100644
15079 --- a/drivers/usb/serial/digi_acceleport.c
15080 +++ b/drivers/usb/serial/digi_acceleport.c
15081 @@ -455,7 +455,6 @@ static int digi_write_room(struct tty_struct *tty);
15082  static int digi_chars_in_buffer(struct tty_struct *tty);
15083  static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
15084  static void digi_close(struct usb_serial_port *port);
15085 -static int digi_carrier_raised(struct usb_serial_port *port);
15086  static void digi_dtr_rts(struct usb_serial_port *port, int on);
15087  static int digi_startup_device(struct usb_serial *serial);
15088  static int digi_startup(struct usb_serial *serial);
15089 @@ -511,7 +510,6 @@ static struct usb_serial_driver digi_acceleport_2_device = {
15090  	.open =				digi_open,
15091  	.close =			digi_close,
15092  	.dtr_rts =			digi_dtr_rts,
15093 -	.carrier_raised =		digi_carrier_raised,
15094  	.write =			digi_write,
15095  	.write_room =			digi_write_room,
15096  	.write_bulk_callback = 		digi_write_bulk_callback,
15097 @@ -1339,14 +1337,6 @@ static void digi_dtr_rts(struct usb_serial_port *port, int on)
15098  	digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
15099  }
15100  
15101 -static int digi_carrier_raised(struct usb_serial_port *port)
15102 -{
15103 -	struct digi_port *priv = usb_get_serial_port_data(port);
15104 -	if (priv->dp_modem_signals & TIOCM_CD)
15105 -		return 1;
15106 -	return 0;
15107 -}
15108 -
15109  static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
15110  {
15111  	int ret;
15112 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
15113 index 97cc87d..88bef02 100644
15114 --- a/drivers/usb/serial/ftdi_sio.c
15115 +++ b/drivers/usb/serial/ftdi_sio.c
15116 @@ -99,6 +99,7 @@ struct ftdi_sio_quirk {
15117  static int   ftdi_jtag_probe(struct usb_serial *serial);
15118  static int   ftdi_mtxorb_hack_setup(struct usb_serial *serial);
15119  static int   ftdi_NDI_device_setup(struct usb_serial *serial);
15120 +static int   ftdi_stmclite_probe(struct usb_serial *serial);
15121  static void  ftdi_USB_UIRT_setup(struct ftdi_private *priv);
15122  static void  ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
15123  
15124 @@ -122,6 +123,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
15125  	.port_probe = ftdi_HE_TIRA1_setup,
15126  };
15127  
15128 +static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
15129 +	.probe	= ftdi_stmclite_probe,
15130 +};
15131 +
15132  /*
15133   * The 8U232AM has the same API as the sio except for:
15134   * - it can support MUCH higher baudrates; up to:
15135 @@ -177,6 +182,7 @@ static struct usb_device_id id_table_combined [] = {
15136  	{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) },
15137  	{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
15138  	{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
15139 +	{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
15140  	{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
15141  	{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
15142  	{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
15143 @@ -200,6 +206,7 @@ static struct usb_device_id id_table_combined [] = {
15144  	{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
15145  	{ USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
15146  	{ USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
15147 +	{ USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
15148  	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
15149  	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
15150  	{ USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
15151 @@ -613,6 +620,7 @@ static struct usb_device_id id_table_combined [] = {
15152  	{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
15153  	{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
15154  	{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
15155 +	{ USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
15156  	{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
15157  	{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
15158  	{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
15159 @@ -673,8 +681,17 @@ static struct usb_device_id id_table_combined [] = {
15160  	{ USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
15161  	{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
15162  	{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
15163 -	{ USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
15164 -	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
15165 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
15166 +	{ USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
15167 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
15168 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
15169 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
15170 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
15171 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
15172 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
15173 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
15174 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
15175 +	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
15176  	{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
15177  	{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
15178  	{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
15179 @@ -696,6 +713,7 @@ static struct usb_device_id id_table_combined [] = {
15180  		.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
15181  	{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
15182  	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
15183 +	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
15184  	{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
15185  	{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
15186  	{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
15187 @@ -715,8 +733,37 @@ static struct usb_device_id id_table_combined [] = {
15188  		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
15189  	{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
15190  	{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
15191 +
15192 +	/* Papouch devices based on FTDI chip */
15193 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) },
15194 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) },
15195 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) },
15196 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) },
15197 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) },
15198 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) },
15199 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) },
15200 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) },
15201 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) },
15202 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) },
15203 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
15204 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) },
15205 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) },
15206 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) },
15207  	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
15208 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) },
15209 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) },
15210 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) },
15211 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) },
15212 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) },
15213 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) },
15214 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) },
15215 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) },
15216 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) },
15217 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) },
15218  	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
15219 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) },
15220 +	{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) },
15221 +
15222  	{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
15223  	{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
15224  	{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
15225 @@ -751,6 +798,7 @@ static struct usb_device_id id_table_combined [] = {
15226  	{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
15227  		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
15228  	{ USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
15229 +	{ USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) },
15230  	{ USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
15231  		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
15232  	{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
15233 @@ -761,6 +809,14 @@ static struct usb_device_id id_table_combined [] = {
15234  	{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
15235  	{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
15236  	{ USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
15237 +	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
15238 +	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
15239 +	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
15240 +	{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
15241 +	{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
15242 +		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
15243 +	{ USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
15244 +		.driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
15245  	{ },					/* Optional parameter entry */
15246  	{ }					/* Terminating entry */
15247  };
15248 @@ -1642,6 +1698,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
15249  }
15250  
15251  /*
15252 + * First and second port on STMCLiteadaptors is reserved for JTAG interface
15253 + * and the forth port for pio
15254 + */
15255 +static int ftdi_stmclite_probe(struct usb_serial *serial)
15256 +{
15257 +	struct usb_device *udev = serial->dev;
15258 +	struct usb_interface *interface = serial->interface;
15259 +
15260 +	dbg("%s", __func__);
15261 +
15262 +	if (interface == udev->actconfig->interface[2])
15263 +		return 0;
15264 +
15265 +	dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
15266 +
15267 +	return -ENODEV;
15268 +}
15269 +
15270 +/*
15271   * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
15272   * We have to correct it if we want to read from it.
15273   */
15274 @@ -2028,8 +2103,6 @@ static void ftdi_set_termios(struct tty_struct *tty,
15275  				"urb failed to set to rts/cts flow control\n");
15276  		}
15277  
15278 -		/* raise DTR/RTS */
15279 -		set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
15280  	} else {
15281  		/*
15282  		 * Xon/Xoff code
15283 @@ -2077,8 +2150,6 @@ static void ftdi_set_termios(struct tty_struct *tty,
15284  			}
15285  		}
15286  
15287 -		/* lower DTR/RTS */
15288 -		clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
15289  	}
15290  	return;
15291  }
15292 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
15293 index 15a4583..3523df5 100644
15294 --- a/drivers/usb/serial/ftdi_sio_ids.h
15295 +++ b/drivers/usb/serial/ftdi_sio_ids.h
15296 @@ -61,6 +61,7 @@
15297  #define FTDI_OPENDCC_SNIFFER_PID	0xBFD9
15298  #define FTDI_OPENDCC_THROTTLE_PID	0xBFDA
15299  #define FTDI_OPENDCC_GATEWAY_PID	0xBFDB
15300 +#define FTDI_OPENDCC_GBM_PID	0xBFDC
15301  
15302  /*
15303   * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
15304 @@ -113,6 +114,9 @@
15305  /* Lenz LI-USB Computer Interface. */
15306  #define FTDI_LENZ_LIUSB_PID	0xD780
15307  
15308 +/* Vardaan Enterprises Serial Interface VEUSB422R3 */
15309 +#define FTDI_VARDAAN_PID	0xF070
15310 +
15311  /*
15312   * Xsens Technologies BV products (http://www.xsens.com).
15313   */
15314 @@ -514,6 +518,12 @@
15315  #define RATOC_PRODUCT_ID_USB60F	0xb020
15316  
15317  /*
15318 + * Acton Research Corp.
15319 + */
15320 +#define ACTON_VID		0x0647	/* Vendor ID */
15321 +#define ACTON_SPECTRAPRO_PID	0x0100
15322 +
15323 +/*
15324   * Contec products (http://www.contec.com)
15325   * Submitted by Daniel Sangorrin
15326   */
15327 @@ -565,11 +575,23 @@
15328  #define OCT_US101_PID		0x0421	/* OCT US101 USB to RS-232 */
15329  
15330  /*
15331 - * Icom ID-1 digital transceiver
15332 + * Definitions for Icom Inc. devices
15333   */
15334 -
15335 -#define ICOM_ID1_VID            0x0C26
15336 -#define ICOM_ID1_PID            0x0004
15337 +#define ICOM_VID		0x0C26 /* Icom vendor ID */
15338 +/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
15339 +#define ICOM_ID_1_PID		0x0004 /* ID-1 USB to RS-232 */
15340 +/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
15341 +#define ICOM_OPC_U_UC_PID	0x0018 /* OPC-478UC, OPC-1122U cloning cable */
15342 +/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
15343 +#define ICOM_ID_RP2C1_PID	0x0009 /* ID-RP2C Asset 1 to RS-232 */
15344 +#define ICOM_ID_RP2C2_PID	0x000A /* ID-RP2C Asset 2 to RS-232 */
15345 +#define ICOM_ID_RP2D_PID	0x000B /* ID-RP2D configuration port*/
15346 +#define ICOM_ID_RP2VT_PID	0x000C /* ID-RP2V Transmit config port */
15347 +#define ICOM_ID_RP2VR_PID	0x000D /* ID-RP2V Receive config port */
15348 +#define ICOM_ID_RP4KVT_PID	0x0010 /* ID-RP4000V Transmit config port */
15349 +#define ICOM_ID_RP4KVR_PID	0x0011 /* ID-RP4000V Receive config port */
15350 +#define ICOM_ID_RP2KVT_PID	0x0012 /* ID-RP2000V Transmit config port */
15351 +#define ICOM_ID_RP2KVR_PID	0x0013 /* ID-RP2000V Receive config port */
15352  
15353  /*
15354   * GN Otometrics (http://www.otometrics.com)
15355 @@ -720,6 +742,7 @@
15356   */
15357  #define RTSYSTEMS_VID			0x2100	/* Vendor ID */
15358  #define RTSYSTEMS_SERIAL_VX7_PID	0x9e52	/* Serial converter for VX-7 Radios using FT232RL */
15359 +#define RTSYSTEMS_CT29B_PID		0x9e54	/* CT29B Radio Cable */
15360  
15361  /*
15362   * Bayer Ascensia Contour blood glucose meter USB-converter cable.
15363 @@ -1017,14 +1040,45 @@
15364  #define WHT_PID			0x0004 /* Wireless Handheld Terminal */
15365  
15366  /*
15367 + * STMicroelectonics
15368 + */
15369 +#define ST_VID			0x0483
15370 +#define ST_STMCLT1030_PID	0x3747 /* ST Micro Connect Lite STMCLT1030 */
15371 +
15372 +/*
15373   * Papouch products (http://www.papouch.com/)
15374   * Submitted by Folkert van Heusden
15375   */
15376  
15377  #define PAPOUCH_VID			0x5050	/* Vendor ID */
15378 +#define PAPOUCH_SB485_PID		0x0100	/* Papouch SB485 USB-485/422 Converter */
15379 +#define PAPOUCH_AP485_PID		0x0101	/* AP485 USB-RS485 Converter */
15380 +#define PAPOUCH_SB422_PID		0x0102	/* Papouch SB422 USB-RS422 Converter  */
15381 +#define PAPOUCH_SB485_2_PID		0x0103	/* Papouch SB485 USB-485/422 Converter */
15382 +#define PAPOUCH_AP485_2_PID		0x0104	/* AP485 USB-RS485 Converter */
15383 +#define PAPOUCH_SB422_2_PID		0x0105	/* Papouch SB422 USB-RS422 Converter  */
15384 +#define PAPOUCH_SB485S_PID		0x0106	/* Papouch SB485S USB-485/422 Converter */
15385 +#define PAPOUCH_SB485C_PID		0x0107	/* Papouch SB485C USB-485/422 Converter */
15386 +#define PAPOUCH_LEC_PID			0x0300	/* LEC USB Converter */
15387 +#define PAPOUCH_SB232_PID		0x0301	/* Papouch SB232 USB-RS232 Converter */
15388  #define PAPOUCH_TMU_PID			0x0400	/* TMU USB Thermometer */
15389 -#define PAPOUCH_QUIDO4x4_PID		0x0900	/* Quido 4/4 Module */
15390 +#define PAPOUCH_IRAMP_PID		0x0500	/* Papouch IRAmp Duplex */
15391 +#define PAPOUCH_DRAK5_PID		0x0700	/* Papouch DRAK5 */
15392 +#define PAPOUCH_QUIDO8x8_PID		0x0800	/* Papouch Quido 8/8 Module */
15393 +#define PAPOUCH_QUIDO4x4_PID		0x0900	/* Papouch Quido 4/4 Module */
15394 +#define PAPOUCH_QUIDO2x2_PID		0x0a00	/* Papouch Quido 2/2 Module */
15395 +#define PAPOUCH_QUIDO10x1_PID		0x0b00	/* Papouch Quido 10/1 Module */
15396 +#define PAPOUCH_QUIDO30x3_PID		0x0c00	/* Papouch Quido 30/3 Module */
15397 +#define PAPOUCH_QUIDO60x3_PID		0x0d00	/* Papouch Quido 60(100)/3 Module */
15398 +#define PAPOUCH_QUIDO2x16_PID		0x0e00	/* Papouch Quido 2/16 Module */
15399 +#define PAPOUCH_QUIDO3x32_PID		0x0f00	/* Papouch Quido 3/32 Module */
15400 +#define PAPOUCH_DRAK6_PID		0x1000	/* Papouch DRAK6 */
15401 +#define PAPOUCH_UPSUSB_PID		0x8000	/* Papouch UPS-USB adapter */
15402 +#define PAPOUCH_MU_PID			0x8001	/* MU controller */
15403 +#define PAPOUCH_SIMUKEY_PID		0x8002	/* Papouch SimuKey */
15404  #define PAPOUCH_AD4USB_PID		0x8003	/* AD4USB Measurement Module */
15405 +#define PAPOUCH_GMUX_PID		0x8004	/* Papouch GOLIATH MUX */
15406 +#define PAPOUCH_GMSR_PID		0x8005	/* Papouch GOLIATH MSR */
15407  
15408  /*
15409   * Marvell SheevaPlug
15410 @@ -1051,6 +1105,11 @@
15411  #define MJSG_HD_RADIO_PID	0x937C
15412  
15413  /*
15414 + * D.O.Tec products (http://www.directout.eu)
15415 + */
15416 +#define FTDI_DOTEC_PID 0x9868
15417 +
15418 +/*
15419   * Xverve Signalyzer tools (http://www.signalyzer.com/)
15420   */
15421  #define XVERVE_SIGNALYZER_ST_PID	0xBCA0
15422 @@ -1063,3 +1122,21 @@
15423   * Submitted by John G. Rogers
15424   */
15425  #define SEGWAY_RMP200_PID	0xe729
15426 +
15427 +
15428 +/*
15429 + * Accesio USB Data Acquisition products (http://www.accesio.com/)
15430 + */
15431 +#define ACCESIO_COM4SM_PID 	0xD578
15432 +
15433 +/* www.sciencescope.co.uk educational dataloggers */
15434 +#define FTDI_SCIENCESCOPE_LOGBOOKML_PID		0xFF18
15435 +#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID	0xFF1C
15436 +#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID	0xFF1D
15437 +
15438 +/*
15439 + * Milkymist One JTAG/Serial
15440 + */
15441 +#define QIHARDWARE_VID			0x20B7
15442 +#define MILKYMISTONE_JTAGSERIAL_PID	0x0713
15443 +
15444 diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
15445 index e6833e2..e4db5ad 100644
15446 --- a/drivers/usb/serial/generic.c
15447 +++ b/drivers/usb/serial/generic.c
15448 @@ -479,6 +479,26 @@ int usb_serial_handle_break(struct usb_serial_port *port)
15449  }
15450  EXPORT_SYMBOL_GPL(usb_serial_handle_break);
15451  
15452 +/**
15453 + *	usb_serial_handle_dcd_change - handle a change of carrier detect state
15454 + *	@port: usb_serial_port structure for the open port
15455 + *	@tty: tty_struct structure for the port
15456 + *	@status: new carrier detect status, nonzero if active
15457 + */
15458 +void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
15459 +				struct tty_struct *tty, unsigned int status)
15460 +{
15461 +	struct tty_port *port = &usb_port->port;
15462 +
15463 +	dbg("%s - port %d, status %d", __func__, usb_port->number, status);
15464 +
15465 +	if (status)
15466 +		wake_up_interruptible(&port->open_wait);
15467 +	else if (tty && !C_CLOCAL(tty))
15468 +		tty_hangup(tty);
15469 +}
15470 +EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
15471 +
15472  int usb_serial_generic_resume(struct usb_serial *serial)
15473  {
15474  	struct usb_serial_port *port;
15475 diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
15476 index 76e6fb3..db0e3fe 100644
15477 --- a/drivers/usb/serial/io_edgeport.c
15478 +++ b/drivers/usb/serial/io_edgeport.c
15479 @@ -2894,8 +2894,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
15480  
15481  	dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
15482  
15483 -	edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
15484 -	edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
15485 +	edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
15486 +	edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
15487  	edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
15488  
15489  	for (rec = ihex_next_binrec(rec); rec;
15490 diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
15491 index feb56a4..1021a2c 100644
15492 --- a/drivers/usb/serial/io_tables.h
15493 +++ b/drivers/usb/serial/io_tables.h
15494 @@ -196,6 +196,7 @@ static struct usb_serial_driver epic_device = {
15495  		.name		= "epic",
15496  	},
15497  	.description		= "EPiC device",
15498 +	.usb_driver		= &io_driver,
15499  	.id_table		= Epic_port_id_table,
15500  	.num_ports		= 1,
15501  	.open			= edge_open,
15502 diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
15503 index efc7211..7d26e35 100644
15504 --- a/drivers/usb/serial/iuu_phoenix.c
15505 +++ b/drivers/usb/serial/iuu_phoenix.c
15506 @@ -1276,6 +1276,7 @@ static struct usb_serial_driver iuu_device = {
15507  		   .name = "iuu_phoenix",
15508  		   },
15509  	.id_table = id_table,
15510 +	.usb_driver = &iuu_driver,
15511  	.num_ports = 1,
15512  	.bulk_in_size = 512,
15513  	.bulk_out_size = 512,
15514 diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
15515 index bf3297d..1ab6ea8 100644
15516 --- a/drivers/usb/serial/keyspan.h
15517 +++ b/drivers/usb/serial/keyspan.h
15518 @@ -546,6 +546,7 @@ static struct usb_serial_driver keyspan_pre_device = {
15519  		.name		= "keyspan_no_firm",
15520  	},
15521  	.description		= "Keyspan - (without firmware)",
15522 +	.usb_driver		= &keyspan_driver,
15523  	.id_table		= keyspan_pre_ids,
15524  	.num_ports		= 1,
15525  	.attach			= keyspan_fake_startup,
15526 @@ -557,6 +558,7 @@ static struct usb_serial_driver keyspan_1port_device = {
15527  		.name		= "keyspan_1",
15528  	},
15529  	.description		= "Keyspan 1 port adapter",
15530 +	.usb_driver		= &keyspan_driver,
15531  	.id_table		= keyspan_1port_ids,
15532  	.num_ports		= 1,
15533  	.open			= keyspan_open,
15534 @@ -579,6 +581,7 @@ static struct usb_serial_driver keyspan_2port_device = {
15535  		.name		= "keyspan_2",
15536  	},
15537  	.description		= "Keyspan 2 port adapter",
15538 +	.usb_driver		= &keyspan_driver,
15539  	.id_table		= keyspan_2port_ids,
15540  	.num_ports		= 2,
15541  	.open			= keyspan_open,
15542 @@ -601,6 +604,7 @@ static struct usb_serial_driver keyspan_4port_device = {
15543  		.name		= "keyspan_4",
15544  	},
15545  	.description		= "Keyspan 4 port adapter",
15546 +	.usb_driver		= &keyspan_driver,
15547  	.id_table		= keyspan_4port_ids,
15548  	.num_ports		= 4,
15549  	.open			= keyspan_open,
15550 diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
15551 index 185fe9a..2cbd661 100644
15552 --- a/drivers/usb/serial/keyspan_pda.c
15553 +++ b/drivers/usb/serial/keyspan_pda.c
15554 @@ -680,22 +680,6 @@ static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
15555  	}
15556  }
15557  
15558 -static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
15559 -{
15560 -	struct usb_serial *serial = port->serial;
15561 -	unsigned char modembits;
15562 -
15563 -	/* If we can read the modem status and the DCD is low then
15564 -	   carrier is not raised yet */
15565 -	if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
15566 -		if (!(modembits & (1>>6)))
15567 -			return 0;
15568 -	}
15569 -	/* Carrier raised, or we failed (eg disconnected) so
15570 -	   progress accordingly */
15571 -	return 1;
15572 -}
15573 -
15574  
15575  static int keyspan_pda_open(struct tty_struct *tty,
15576  					struct usb_serial_port *port)
15577 @@ -882,7 +866,6 @@ static struct usb_serial_driver keyspan_pda_device = {
15578  	.id_table =		id_table_std,
15579  	.num_ports =		1,
15580  	.dtr_rts =		keyspan_pda_dtr_rts,
15581 -	.carrier_raised	=	keyspan_pda_carrier_raised,
15582  	.open =			keyspan_pda_open,
15583  	.close =		keyspan_pda_close,
15584  	.write =		keyspan_pda_write,
15585 diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
15586 index 7aa01b9..2849f8c 100644
15587 --- a/drivers/usb/serial/mct_u232.c
15588 +++ b/drivers/usb/serial/mct_u232.c
15589 @@ -549,9 +549,12 @@ static void mct_u232_close(struct usb_serial_port *port)
15590  {
15591  	dbg("%s port %d", __func__, port->number);
15592  
15593 -	usb_serial_generic_close(port);
15594 -	if (port->serial->dev)
15595 +	if (port->serial->dev) {
15596 +		/* shutdown our urbs */
15597 +		usb_kill_urb(port->write_urb);
15598 +		usb_kill_urb(port->read_urb);
15599  		usb_kill_urb(port->interrupt_in_urb);
15600 +	}
15601  } /* mct_u232_close */
15602  
15603  
15604 diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
15605 index cf17183..653465f 100644
15606 --- a/drivers/usb/serial/moto_modem.c
15607 +++ b/drivers/usb/serial/moto_modem.c
15608 @@ -44,6 +44,7 @@ static struct usb_serial_driver moto_device = {
15609  		.name =		"moto-modem",
15610  	},
15611  	.id_table =		id_table,
15612 +	.usb_driver =		&moto_driver,
15613  	.num_ports =		1,
15614  };
15615  
15616 diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
15617 index ed01f3b..9ff19c8 100644
15618 --- a/drivers/usb/serial/opticon.c
15619 +++ b/drivers/usb/serial/opticon.c
15620 @@ -96,8 +96,8 @@ static void opticon_bulk_callback(struct urb *urb)
15621  			/* real data, send it to the tty layer */
15622  			tty = tty_port_tty_get(&port->port);
15623  			if (tty) {
15624 -				tty_insert_flip_string(tty, data,
15625 -							       data_length);
15626 +				tty_insert_flip_string(tty, data + 2,
15627 +						       data_length);
15628  				tty_flip_buffer_push(tty);
15629  				tty_kref_put(tty);
15630  			}
15631 @@ -130,7 +130,7 @@ exit:
15632  						  priv->bulk_address),
15633  				  priv->bulk_in_buffer, priv->buffer_size,
15634  				  opticon_bulk_callback, priv);
15635 -		result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
15636 +		result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
15637  		if (result)
15638  			dev_err(&port->dev,
15639  			    "%s - failed resubmitting read urb, error %d\n",
15640 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
15641 index c46911a..0dbcf12 100644
15642 --- a/drivers/usb/serial/option.c
15643 +++ b/drivers/usb/serial/option.c
15644 @@ -382,7 +382,16 @@ static void option_instat_callback(struct urb *urb);
15645  #define HAIER_VENDOR_ID				0x201e
15646  #define HAIER_PRODUCT_CE100			0x2009
15647  
15648 -#define CINTERION_VENDOR_ID			0x0681
15649 +/* Cinterion (formerly Siemens) products */
15650 +#define SIEMENS_VENDOR_ID				0x0681
15651 +#define CINTERION_VENDOR_ID				0x1e2d
15652 +#define CINTERION_PRODUCT_HC25_MDM		0x0047
15653 +#define CINTERION_PRODUCT_HC25_MDMNET	0x0040
15654 +#define CINTERION_PRODUCT_HC28_MDM		0x004C
15655 +#define CINTERION_PRODUCT_HC28_MDMNET	0x004A /* same for HC28J */
15656 +#define CINTERION_PRODUCT_EU3_E			0x0051
15657 +#define CINTERION_PRODUCT_EU3_P			0x0052
15658 +#define CINTERION_PRODUCT_PH8			0x0053
15659  
15660  /* Olivetti products */
15661  #define OLIVETTI_VENDOR_ID			0x0b3c
15662 @@ -512,7 +521,7 @@ static const struct usb_device_id option_ids[] = {
15663  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
15664  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
15665  	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
15666 -	{ USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
15667 +	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
15668  	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
15669  	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
15670  	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
15671 @@ -622,6 +631,7 @@ static const struct usb_device_id option_ids[] = {
15672  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
15673  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
15674  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
15675 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
15676  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
15677  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
15678  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
15679 @@ -633,38 +643,52 @@ static const struct usb_device_id option_ids[] = {
15680  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
15681  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
15682  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
15683 -	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) },
15684 +	/* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
15685  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
15686  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
15687  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
15688  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
15689  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
15690  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
15691 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
15692  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
15693 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
15694  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
15695 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
15696  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
15697  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
15698 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
15699  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
15700  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
15701 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
15702  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
15703  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
15704 +	/* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
15705  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
15706  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
15707 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
15708  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
15709  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
15710 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
15711  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
15712  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
15713  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
15714  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
15715 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
15716  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
15717 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
15718  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
15719 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
15720  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
15721 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
15722  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
15723 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0079, 0xff, 0xff, 0xff) },
15724  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
15725 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
15726  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
15727 -	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
15728 -	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
15729 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
15730  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
15731 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
15732  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
15733  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
15734  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
15735 @@ -880,6 +904,8 @@ static const struct usb_device_id option_ids[] = {
15736  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
15737  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
15738  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
15739 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
15740 +	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
15741  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
15742  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
15743  	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
15744 @@ -922,7 +948,17 @@ static const struct usb_device_id option_ids[] = {
15745  	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
15746  	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
15747  	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
15748 -	{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
15749 +	/* Cinterion */
15750 +	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
15751 +	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
15752 +	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
15753 +	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
15754 +	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
15755 +	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
15756 +	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
15757 +	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
15758 +	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
15759 +
15760  	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
15761  	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
15762  	{ } /* Terminating entry */
15763 diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
15764 index e199b0f..1c46a86 100644
15765 --- a/drivers/usb/serial/oti6858.c
15766 +++ b/drivers/usb/serial/oti6858.c
15767 @@ -157,6 +157,7 @@ static struct usb_serial_driver oti6858_device = {
15768  		.name =		"oti6858",
15769  	},
15770  	.id_table =		id_table,
15771 +	.usb_driver =		&oti6858_driver,
15772  	.num_ports =		1,
15773  	.open =			oti6858_open,
15774  	.close =		oti6858_close,
15775 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
15776 index 8ae4c6c..08c9181 100644
15777 --- a/drivers/usb/serial/pl2303.c
15778 +++ b/drivers/usb/serial/pl2303.c
15779 @@ -50,6 +50,7 @@ static const struct usb_device_id id_table[] = {
15780  	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
15781  	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
15782  	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
15783 +	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
15784  	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
15785  	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
15786  	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
15787 @@ -677,9 +678,11 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
15788  {
15789  
15790  	struct pl2303_private *priv = usb_get_serial_port_data(port);
15791 +	struct tty_struct *tty;
15792  	unsigned long flags;
15793  	u8 status_idx = UART_STATE;
15794  	u8 length = UART_STATE + 1;
15795 +	u8 prev_line_status;
15796  	u16 idv, idp;
15797  
15798  	idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
15799 @@ -701,11 +704,20 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
15800  
15801  	/* Save off the uart status for others to look at */
15802  	spin_lock_irqsave(&priv->lock, flags);
15803 +	prev_line_status = priv->line_status;
15804  	priv->line_status = data[status_idx];
15805  	spin_unlock_irqrestore(&priv->lock, flags);
15806  	if (priv->line_status & UART_BREAK_ERROR)
15807  		usb_serial_handle_break(port);
15808  	wake_up_interruptible(&priv->delta_msr_wait);
15809 +
15810 +	tty = tty_port_tty_get(&port->port);
15811 +	if (!tty)
15812 +		return;
15813 +	if ((priv->line_status ^ prev_line_status) & UART_DCD)
15814 +		usb_serial_handle_dcd_change(port, tty,
15815 +				priv->line_status & UART_DCD);
15816 +	tty_kref_put(tty);
15817  }
15818  
15819  static void pl2303_read_int_callback(struct urb *urb)
15820 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
15821 index 43eb9bd..1b025f7 100644
15822 --- a/drivers/usb/serial/pl2303.h
15823 +++ b/drivers/usb/serial/pl2303.h
15824 @@ -21,6 +21,7 @@
15825  #define PL2303_PRODUCT_ID_MMX		0x0612
15826  #define PL2303_PRODUCT_ID_GPRS		0x0609
15827  #define PL2303_PRODUCT_ID_HCR331	0x331a
15828 +#define PL2303_PRODUCT_ID_MOTOROLA	0x0307
15829  
15830  #define ATEN_VENDOR_ID		0x0557
15831  #define ATEN_VENDOR_ID2		0x0547
15832 diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
15833 index 214a3e5..30b73e6 100644
15834 --- a/drivers/usb/serial/qcaux.c
15835 +++ b/drivers/usb/serial/qcaux.c
15836 @@ -36,6 +36,7 @@
15837  #define UTSTARCOM_PRODUCT_UM175_V1		0x3712
15838  #define UTSTARCOM_PRODUCT_UM175_V2		0x3714
15839  #define UTSTARCOM_PRODUCT_UM175_ALLTEL		0x3715
15840 +#define PANTECH_PRODUCT_UML290_VZW		0x3718
15841  
15842  /* CMOTECH devices */
15843  #define CMOTECH_VENDOR_ID			0x16d8
15844 @@ -66,6 +67,7 @@ static struct usb_device_id id_table[] = {
15845  	{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
15846  	{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
15847  	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
15848 +	{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
15849  	{ },
15850  };
15851  MODULE_DEVICE_TABLE(usb, id_table);
15852 @@ -84,6 +86,7 @@ static struct usb_serial_driver qcaux_device = {
15853  		.name =		"qcaux",
15854  	},
15855  	.id_table =		id_table,
15856 +	.usb_driver =		&qcaux_driver,
15857  	.num_ports =		1,
15858  };
15859  
15860 diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
15861 index cb8195c..74cd4cc 100644
15862 --- a/drivers/usb/serial/siemens_mpi.c
15863 +++ b/drivers/usb/serial/siemens_mpi.c
15864 @@ -42,6 +42,7 @@ static struct usb_serial_driver siemens_usb_mpi_device = {
15865  		.name =		"siemens_mpi",
15866  	},
15867  	.id_table =		id_table,
15868 +	.usb_driver =		&siemens_usb_mpi_driver,
15869  	.num_ports =		1,
15870  };
15871  
15872 diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
15873 index 329d311..f88bc51 100644
15874 --- a/drivers/usb/serial/spcp8x5.c
15875 +++ b/drivers/usb/serial/spcp8x5.c
15876 @@ -133,7 +133,7 @@ struct spcp8x5_usb_ctrl_arg {
15877  
15878  /* how come ??? */
15879  #define UART_STATE			0x08
15880 -#define UART_STATE_TRANSIENT_MASK	0x74
15881 +#define UART_STATE_TRANSIENT_MASK	0x75
15882  #define UART_DCD			0x01
15883  #define UART_DSR			0x02
15884  #define UART_BREAK_ERROR		0x04
15885 @@ -526,6 +526,10 @@ static void spcp8x5_process_read_urb(struct urb *urb)
15886  		/* overrun is special, not associated with a char */
15887  		if (status & UART_OVERRUN_ERROR)
15888  			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
15889 +
15890 +		if (status & UART_DCD)
15891 +			usb_serial_handle_dcd_change(port, tty,
15892 +				   priv->line_status & MSR_STATUS_LINE_DCD);
15893  	}
15894  
15895  	tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
15896 @@ -646,6 +650,7 @@ static struct usb_serial_driver spcp8x5_device = {
15897  		.name =		"SPCP8x5",
15898  	},
15899  	.id_table		= id_table,
15900 +	.usb_driver		= &spcp8x5_driver,
15901  	.num_ports		= 1,
15902  	.open 			= spcp8x5_open,
15903  	.dtr_rts		= spcp8x5_dtr_rts,
15904 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
15905 index 90979a1..c58ef54 100644
15906 --- a/drivers/usb/serial/ti_usb_3410_5052.c
15907 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
15908 @@ -365,9 +365,9 @@ failed_1port:
15909  
15910  static void __exit ti_exit(void)
15911  {
15912 +	usb_deregister(&ti_usb_driver);
15913  	usb_serial_deregister(&ti_1port_device);
15914  	usb_serial_deregister(&ti_2port_device);
15915 -	usb_deregister(&ti_usb_driver);
15916  }
15917  
15918  
15919 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
15920 index 7a2177c..6afd8e8 100644
15921 --- a/drivers/usb/serial/usb-serial.c
15922 +++ b/drivers/usb/serial/usb-serial.c
15923 @@ -52,6 +52,7 @@ static struct usb_driver usb_serial_driver = {
15924  	.suspend =	usb_serial_suspend,
15925  	.resume =	usb_serial_resume,
15926  	.no_dynamic_id = 	1,
15927 +	.supports_autosuspend =	1,
15928  };
15929  
15930  /* There is no MODULE_DEVICE_TABLE for usbserial.c.  Instead
15931 @@ -1334,6 +1335,12 @@ int usb_serial_register(struct usb_serial_driver *driver)
15932  
15933  	if (!driver->description)
15934  		driver->description = driver->driver.name;
15935 +	if (!driver->usb_driver) {
15936 +		WARN(1, "Serial driver %s has no usb_driver\n",
15937 +				driver->description);
15938 +		return -EINVAL;
15939 +	}
15940 +	driver->usb_driver->supports_autosuspend = 1;
15941  
15942  	/* Add this device to our list of devices */
15943  	mutex_lock(&table_lock);
15944 diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
15945 index f2ed6a3..95a8214 100644
15946 --- a/drivers/usb/serial/usb_debug.c
15947 +++ b/drivers/usb/serial/usb_debug.c
15948 @@ -75,6 +75,7 @@ static struct usb_serial_driver debug_device = {
15949  		.name =		"debug",
15950  	},
15951  	.id_table =		id_table,
15952 +	.usb_driver =		&debug_driver,
15953  	.num_ports =		1,
15954  	.bulk_out_size =	USB_DEBUG_MAX_PACKET_SIZE,
15955  	.break_ctl =		usb_debug_break_ctl,
15956 diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
15957 index eb76aae..15a5d89 100644
15958 --- a/drivers/usb/serial/visor.c
15959 +++ b/drivers/usb/serial/visor.c
15960 @@ -606,6 +606,10 @@ static int treo_attach(struct usb_serial *serial)
15961  
15962  static int clie_5_attach(struct usb_serial *serial)
15963  {
15964 +	struct usb_serial_port *port;
15965 +	unsigned int pipe;
15966 +	int j;
15967 +
15968  	dbg("%s", __func__);
15969  
15970  	/* TH55 registers 2 ports.
15971 @@ -621,9 +625,14 @@ static int clie_5_attach(struct usb_serial *serial)
15972  		return -1;
15973  
15974  	/* port 0 now uses the modified endpoint Address */
15975 -	serial->port[0]->bulk_out_endpointAddress =
15976 +	port = serial->port[0];
15977 +	port->bulk_out_endpointAddress =
15978  				serial->port[1]->bulk_out_endpointAddress;
15979  
15980 +	pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress);
15981 +	for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j)
15982 +		port->write_urbs[j]->pipe = pipe;
15983 +
15984  	return 0;
15985  }
15986  
15987 diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
15988 index 57fc2f5..ceba512 100644
15989 --- a/drivers/usb/storage/sierra_ms.c
15990 +++ b/drivers/usb/storage/sierra_ms.c
15991 @@ -121,7 +121,7 @@ static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
15992  	}
15993  	return result;
15994  }
15995 -static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
15996 +static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL);
15997  
15998  int sierra_ms_init(struct us_data *us)
15999  {
16000 diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
16001 index 44be6d7..fba2824 100644
16002 --- a/drivers/usb/storage/unusual_cypress.h
16003 +++ b/drivers/usb/storage/unusual_cypress.h
16004 @@ -31,4 +31,9 @@ UNUSUAL_DEV(  0x04b4, 0x6831, 0x0000, 0x9999,
16005  		"Cypress ISD-300LP",
16006  		US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0),
16007  
16008 +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
16009 +		"Super Top",
16010 +		"USB 2.0  SATA BRIDGE",
16011 +		US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0),
16012 +
16013  #endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
16014 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
16015 index 2c897ee..b0c0a33 100644
16016 --- a/drivers/usb/storage/unusual_devs.h
16017 +++ b/drivers/usb/storage/unusual_devs.h
16018 @@ -481,6 +481,13 @@ UNUSUAL_DEV(  0x04e8, 0x507c, 0x0220, 0x0220,
16019  		US_SC_DEVICE, US_PR_DEVICE, NULL,
16020  		US_FL_MAX_SECTORS_64),
16021  
16022 +/* Reported by Vitaly Kuznetsov <vitty@altlinux.ru> */
16023 +UNUSUAL_DEV(  0x04e8, 0x5122, 0x0000, 0x9999,
16024 +		"Samsung",
16025 +		"YP-CP3",
16026 +		US_SC_DEVICE, US_PR_DEVICE, NULL,
16027 +		US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
16028 +
16029  /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
16030   * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
16031   * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
16032 @@ -1036,6 +1043,15 @@ UNUSUAL_DEV(  0x084d, 0x0011, 0x0110, 0x0110,
16033  		US_SC_DEVICE, US_PR_DEVICE, NULL,
16034  		US_FL_BULK32),
16035  
16036 +/* Reported by <ttkspam@free.fr>
16037 + * The device reports a vendor-specific device class, requiring an
16038 + * explicit vendor/product match.
16039 + */
16040 +UNUSUAL_DEV(  0x0851, 0x1542, 0x0002, 0x0002,
16041 +		"MagicPixel",
16042 +		"FW_Omega2",
16043 +		US_SC_DEVICE, US_PR_DEVICE, NULL, 0),
16044 +
16045  /* Andrew Lunn <andrew@lunn.ch>
16046   * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
16047   * on LUN 4.
16048 @@ -1380,6 +1396,13 @@ UNUSUAL_DEV(  0x0f19, 0x0105, 0x0100, 0x0100,
16049  		US_SC_DEVICE, US_PR_DEVICE, NULL,
16050  		US_FL_IGNORE_RESIDUE ),
16051  
16052 +/* Submitted by Nick Holloway */
16053 +UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
16054 +		"VTech",
16055 +		"Kidizoom",
16056 +		US_SC_DEVICE, US_PR_DEVICE, NULL,
16057 +		US_FL_FIX_CAPACITY ),
16058 +
16059  /* Reported by Michael Stattmann <michael@stattmann.com> */
16060  UNUSUAL_DEV(  0x0fce, 0xd008, 0x0000, 0x0000,
16061  		"Sony Ericsson",
16062 @@ -1859,6 +1882,22 @@ UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
16063  		US_SC_DEVICE, US_PR_DEVICE, NULL,
16064  		US_FL_BAD_SENSE ),
16065  
16066 +/* Patch by Richard Schütz <r.schtz@t-online.de>
16067 + * This external hard drive enclosure uses a JMicron chip which
16068 + * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
16069 +UNUSUAL_DEV(  0x1e68, 0x001b, 0x0000, 0x0000,
16070 +		"TrekStor GmbH & Co. KG",
16071 +		"DataStation maxi g.u",
16072 +		US_SC_DEVICE, US_PR_DEVICE, NULL,
16073 +		US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
16074 +
16075 +/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
16076 +UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
16077 +		"Coby Electronics",
16078 +		"MP3 Player",
16079 +		US_SC_DEVICE, US_PR_DEVICE, NULL,
16080 +		US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
16081 +
16082  UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
16083  		"ST",
16084  		"2A",
16085 diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
16086 index 38ffc3f..c06c667 100644
16087 --- a/drivers/video/backlight/88pm860x_bl.c
16088 +++ b/drivers/video/backlight/88pm860x_bl.c
16089 @@ -21,7 +21,7 @@
16090  #define MAX_BRIGHTNESS		(0xFF)
16091  #define MIN_BRIGHTNESS		(0)
16092  
16093 -#define CURRENT_MASK		(0x1F << 1)
16094 +#define CURRENT_BITMASK		(0x1F << 1)
16095  
16096  struct pm860x_backlight_data {
16097  	struct pm860x_chip *chip;
16098 @@ -85,7 +85,7 @@ static int pm860x_backlight_set(struct backlight_device *bl, int brightness)
16099  	if ((data->current_brightness == 0) && brightness) {
16100  		if (data->iset) {
16101  			ret = pm860x_set_bits(data->i2c, wled_idc(data->port),
16102 -					      CURRENT_MASK, data->iset);
16103 +					      CURRENT_BITMASK, data->iset);
16104  			if (ret < 0)
16105  				goto out;
16106  		}
16107 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
16108 index e207810..0870329 100644
16109 --- a/drivers/video/backlight/backlight.c
16110 +++ b/drivers/video/backlight/backlight.c
16111 @@ -197,12 +197,12 @@ static int backlight_suspend(struct device *dev, pm_message_t state)
16112  {
16113  	struct backlight_device *bd = to_backlight_device(dev);
16114  
16115 -	if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
16116 -		mutex_lock(&bd->ops_lock);
16117 +	mutex_lock(&bd->ops_lock);
16118 +	if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
16119  		bd->props.state |= BL_CORE_SUSPENDED;
16120  		backlight_update_status(bd);
16121 -		mutex_unlock(&bd->ops_lock);
16122  	}
16123 +	mutex_unlock(&bd->ops_lock);
16124  
16125  	return 0;
16126  }
16127 @@ -211,12 +211,12 @@ static int backlight_resume(struct device *dev)
16128  {
16129  	struct backlight_device *bd = to_backlight_device(dev);
16130  
16131 -	if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
16132 -		mutex_lock(&bd->ops_lock);
16133 +	mutex_lock(&bd->ops_lock);
16134 +	if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
16135  		bd->props.state &= ~BL_CORE_SUSPENDED;
16136  		backlight_update_status(bd);
16137 -		mutex_unlock(&bd->ops_lock);
16138  	}
16139 +	mutex_unlock(&bd->ops_lock);
16140  
16141  	return 0;
16142  }
16143 diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c
16144 index e44893e..c2f4e6e 100644
16145 --- a/drivers/video/via/accel.c
16146 +++ b/drivers/video/via/accel.c
16147 @@ -283,11 +283,12 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
16148  		writel(tmp, engine + 0x1C);
16149  	}
16150  
16151 -	if (op != VIA_BITBLT_COLOR)
16152 +	if (op == VIA_BITBLT_FILL) {
16153 +		writel(fg_color, engine + 0x58);
16154 +	} else if (op == VIA_BITBLT_MONO) {
16155  		writel(fg_color, engine + 0x4C);
16156 -
16157 -	if (op == VIA_BITBLT_MONO)
16158  		writel(bg_color, engine + 0x50);
16159 +	}
16160  
16161  	if (op == VIA_BITBLT_FILL)
16162  		ge_cmd |= fill_rop << 24 | 0x00002000 | 0x00000001;
16163 diff --git a/drivers/video/via/via_i2c.c b/drivers/video/via/via_i2c.c
16164 index da9e4ca..021112e 100644
16165 --- a/drivers/video/via/via_i2c.c
16166 +++ b/drivers/video/via/via_i2c.c
16167 @@ -114,6 +114,7 @@ static void via_i2c_setsda(void *data, int state)
16168  
16169  int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
16170  {
16171 +	int ret;
16172  	u8 mm1[] = {0x00};
16173  	struct i2c_msg msgs[2];
16174  
16175 @@ -126,11 +127,18 @@ int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
16176  	mm1[0] = index;
16177  	msgs[0].len = 1; msgs[1].len = 1;
16178  	msgs[0].buf = mm1; msgs[1].buf = pdata;
16179 -	return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
16180 +	ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
16181 +	if (ret == 2)
16182 +		ret = 0;
16183 +	else if (ret >= 0)
16184 +		ret = -EIO;
16185 +
16186 +	return ret;
16187  }
16188  
16189  int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
16190  {
16191 +	int ret;
16192  	u8 msg[2] = { index, data };
16193  	struct i2c_msg msgs;
16194  
16195 @@ -140,11 +148,18 @@ int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
16196  	msgs.addr = slave_addr / 2;
16197  	msgs.len = 2;
16198  	msgs.buf = msg;
16199 -	return i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
16200 +	ret = i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
16201 +	if (ret == 1)
16202 +		ret = 0;
16203 +	else if (ret >= 0)
16204 +		ret = -EIO;
16205 +
16206 +	return ret;
16207  }
16208  
16209  int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len)
16210  {
16211 +	int ret;
16212  	u8 mm1[] = {0x00};
16213  	struct i2c_msg msgs[2];
16214  
16215 @@ -156,7 +171,13 @@ int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len
16216  	mm1[0] = index;
16217  	msgs[0].len = 1; msgs[1].len = buff_len;
16218  	msgs[0].buf = mm1; msgs[1].buf = buff;
16219 -	return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
16220 +	ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
16221 +	if (ret == 2)
16222 +		ret = 0;
16223 +	else if (ret >= 0)
16224 +		ret = -EIO;
16225 +
16226 +	return ret;
16227  }
16228  
16229  /*
16230 diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
16231 index ef8d9d5..4fb5b2b 100644
16232 --- a/drivers/virtio/virtio_pci.c
16233 +++ b/drivers/virtio/virtio_pci.c
16234 @@ -96,11 +96,6 @@ static struct pci_device_id virtio_pci_id_table[] = {
16235  
16236  MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
16237  
16238 -/* A PCI device has it's own struct device and so does a virtio device so
16239 - * we create a place for the virtio devices to show up in sysfs.  I think it
16240 - * would make more sense for virtio to not insist on having it's own device. */
16241 -static struct device *virtio_pci_root;
16242 -
16243  /* Convert a generic virtio device to our structure */
16244  static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
16245  {
16246 @@ -629,7 +624,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
16247  	if (vp_dev == NULL)
16248  		return -ENOMEM;
16249  
16250 -	vp_dev->vdev.dev.parent = virtio_pci_root;
16251 +	vp_dev->vdev.dev.parent = &pci_dev->dev;
16252  	vp_dev->vdev.dev.release = virtio_pci_release_dev;
16253  	vp_dev->vdev.config = &virtio_pci_config_ops;
16254  	vp_dev->pci_dev = pci_dev;
16255 @@ -717,17 +712,7 @@ static struct pci_driver virtio_pci_driver = {
16256  
16257  static int __init virtio_pci_init(void)
16258  {
16259 -	int err;
16260 -
16261 -	virtio_pci_root = root_device_register("virtio-pci");
16262 -	if (IS_ERR(virtio_pci_root))
16263 -		return PTR_ERR(virtio_pci_root);
16264 -
16265 -	err = pci_register_driver(&virtio_pci_driver);
16266 -	if (err)
16267 -		root_device_unregister(virtio_pci_root);
16268 -
16269 -	return err;
16270 +	return pci_register_driver(&virtio_pci_driver);
16271  }
16272  
16273  module_init(virtio_pci_init);
16274 @@ -735,7 +720,6 @@ module_init(virtio_pci_init);
16275  static void __exit virtio_pci_exit(void)
16276  {
16277  	pci_unregister_driver(&virtio_pci_driver);
16278 -	root_device_unregister(virtio_pci_root);
16279  }
16280  
16281  module_exit(virtio_pci_exit);
16282 diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
16283 index 428f8a1..3939e53 100644
16284 --- a/drivers/watchdog/rdc321x_wdt.c
16285 +++ b/drivers/watchdog/rdc321x_wdt.c
16286 @@ -231,7 +231,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
16287  	struct resource *r;
16288  	struct rdc321x_wdt_pdata *pdata;
16289  
16290 -	pdata = pdev->dev.platform_data;
16291 +	pdata = platform_get_drvdata(pdev);
16292  	if (!pdata) {
16293  		dev_err(&pdev->dev, "no platform data supplied\n");
16294  		return -ENODEV;
16295 diff --git a/drivers/xen/events.c b/drivers/xen/events.c
16296 index 13365ba..a68cc625 100644
16297 --- a/drivers/xen/events.c
16298 +++ b/drivers/xen/events.c
16299 @@ -261,7 +261,7 @@ static void init_evtchn_cpu_bindings(void)
16300  	}
16301  #endif
16302  
16303 -	memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
16304 +	memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
16305  }
16306  
16307  static inline void clear_evtchn(int port)
16308 diff --git a/fs/bio.c b/fs/bio.c
16309 index 8abb2df..4bd454f 100644
16310 --- a/fs/bio.c
16311 +++ b/fs/bio.c
16312 @@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
16313  {
16314  	struct bio *bio;
16315  
16316 +	if (nr_iovecs > UIO_MAXIOV)
16317 +		return NULL;
16318 +
16319  	bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
16320  		      gfp_mask);
16321  	if (unlikely(!bio))
16322 @@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd)
16323  static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
16324  					       gfp_t gfp_mask)
16325  {
16326 -	struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
16327 +	struct bio_map_data *bmd;
16328  
16329 +	if (iov_count > UIO_MAXIOV)
16330 +		return NULL;
16331 +
16332 +	bmd = kmalloc(sizeof(*bmd), gfp_mask);
16333  	if (!bmd)
16334  		return NULL;
16335  
16336 @@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
16337  		end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
16338  		start = uaddr >> PAGE_SHIFT;
16339  
16340 +		/*
16341 +		 * Overflow, abort
16342 +		 */
16343 +		if (end < start)
16344 +			return ERR_PTR(-EINVAL);
16345 +
16346  		nr_pages += end - start;
16347  		len += iov[i].iov_len;
16348  	}
16349 @@ -955,6 +968,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
16350  		unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
16351  		unsigned long start = uaddr >> PAGE_SHIFT;
16352  
16353 +		/*
16354 +		 * Overflow, abort
16355 +		 */
16356 +		if (end < start)
16357 +			return ERR_PTR(-EINVAL);
16358 +
16359  		nr_pages += end - start;
16360  		/*
16361  		 * buffer must be aligned to at least hardsector size for now
16362 @@ -982,7 +1001,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
16363  		unsigned long start = uaddr >> PAGE_SHIFT;
16364  		const int local_nr_pages = end - start;
16365  		const int page_limit = cur_page + local_nr_pages;
16366 -		
16367 +
16368  		ret = get_user_pages_fast(uaddr, local_nr_pages,
16369  				write_to_vm, &pages[cur_page]);
16370  		if (ret < local_nr_pages) {
16371 diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
16372 index 1d60c65..f110e0e 100644
16373 --- a/fs/cifs/cifsproto.h
16374 +++ b/fs/cifs/cifsproto.h
16375 @@ -107,7 +107,8 @@ extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
16376  
16377  extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
16378  				__u16 fileHandle, struct file *file,
16379 -				struct vfsmount *mnt, unsigned int oflags);
16380 +				struct vfsmount *mnt, unsigned int oflags,
16381 +				__u32 oplock);
16382  extern int cifs_posix_open(char *full_path, struct inode **pinode,
16383  				struct super_block *sb,
16384  				int mode, int oflags,
16385 diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
16386 index f9ed075..0f947bf 100644
16387 --- a/fs/cifs/dir.c
16388 +++ b/fs/cifs/dir.c
16389 @@ -132,9 +132,9 @@ cifs_bp_rename_retry:
16390  
16391  struct cifsFileInfo *
16392  cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
16393 -		  struct file *file, struct vfsmount *mnt, unsigned int oflags)
16394 +		  struct file *file, struct vfsmount *mnt, unsigned int oflags,
16395 +		  __u32 oplock)
16396  {
16397 -	int oplock = 0;
16398  	struct cifsFileInfo *pCifsFile;
16399  	struct cifsInodeInfo *pCifsInode;
16400  	struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
16401 @@ -143,9 +143,6 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
16402  	if (pCifsFile == NULL)
16403  		return pCifsFile;
16404  
16405 -	if (oplockEnabled)
16406 -		oplock = REQ_OPLOCK;
16407 -
16408  	pCifsFile->netfid = fileHandle;
16409  	pCifsFile->pid = current->tgid;
16410  	pCifsFile->pInode = igrab(newinode);
16411 @@ -468,7 +465,7 @@ cifs_create_set_dentry:
16412  		}
16413  
16414  		pfile_info = cifs_new_fileinfo(newinode, fileHandle, filp,
16415 -					       nd->path.mnt, oflags);
16416 +					       nd->path.mnt, oflags, oplock);
16417  		if (pfile_info == NULL) {
16418  			fput(filp);
16419  			CIFSSMBClose(xid, tcon, fileHandle);
16420 @@ -729,7 +726,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
16421  
16422  			cfile = cifs_new_fileinfo(newInode, fileHandle, filp,
16423  						  nd->path.mnt,
16424 -						  nd->intent.open.flags);
16425 +						  nd->intent.open.flags,
16426 +						  oplock);
16427  			if (cfile == NULL) {
16428  				fput(filp);
16429  				CIFSSMBClose(xid, pTcon, fileHandle);
16430 diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
16431 index 0eb8702..548f062 100644
16432 --- a/fs/cifs/dns_resolve.c
16433 +++ b/fs/cifs/dns_resolve.c
16434 @@ -66,7 +66,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
16435  	/* Search for server name delimiter */
16436  	sep = memchr(hostname, '\\', len);
16437  	if (sep)
16438 -		len = sep - unc;
16439 +		len = sep - hostname;
16440  	else
16441  		cFYI(1, "%s: probably server name is whole unc: %s",
16442  		     __func__, unc);
16443 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
16444 index de748c6..681761c 100644
16445 --- a/fs/cifs/file.c
16446 +++ b/fs/cifs/file.c
16447 @@ -277,7 +277,7 @@ int cifs_open(struct inode *inode, struct file *file)
16448  
16449  			pCifsFile = cifs_new_fileinfo(inode, netfid, file,
16450  							file->f_path.mnt,
16451 -							oflags);
16452 +							oflags, oplock);
16453  			if (pCifsFile == NULL) {
16454  				CIFSSMBClose(xid, tcon, netfid);
16455  				rc = -ENOMEM;
16456 @@ -370,7 +370,7 @@ int cifs_open(struct inode *inode, struct file *file)
16457  		goto out;
16458  
16459  	pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
16460 -					file->f_flags);
16461 +					file->f_flags, oplock);
16462  	if (pCifsFile == NULL) {
16463  		rc = -ENOMEM;
16464  		goto out;
16465 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
16466 index 53cce8c..00d1ff3 100644
16467 --- a/fs/cifs/inode.c
16468 +++ b/fs/cifs/inode.c
16469 @@ -835,8 +835,10 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
16470  		rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
16471  						xid, NULL);
16472  
16473 -	if (!inode)
16474 -		return ERR_PTR(rc);
16475 +	if (!inode) {
16476 +		inode = ERR_PTR(rc);
16477 +		goto out;
16478 +	}
16479  
16480  #ifdef CONFIG_CIFS_FSCACHE
16481  	/* populate tcon->resource_id */
16482 @@ -852,13 +854,11 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
16483  		inode->i_uid = cifs_sb->mnt_uid;
16484  		inode->i_gid = cifs_sb->mnt_gid;
16485  	} else if (rc) {
16486 -		kfree(full_path);
16487 -		_FreeXid(xid);
16488  		iget_failed(inode);
16489 -		return ERR_PTR(rc);
16490 +		inode = ERR_PTR(rc);
16491  	}
16492  
16493 -
16494 +out:
16495  	kfree(full_path);
16496  	/* can not call macro FreeXid here since in a void func
16497  	 * TODO: This is no longer true
16498 diff --git a/fs/compat.c b/fs/compat.c
16499 index 0644a15..8b41dcd 100644
16500 --- a/fs/compat.c
16501 +++ b/fs/compat.c
16502 @@ -1378,6 +1378,10 @@ static int compat_count(compat_uptr_t __user *argv, int max)
16503  			argv++;
16504  			if (i++ >= max)
16505  				return -E2BIG;
16506 +
16507 +			if (fatal_signal_pending(current))
16508 +				return -ERESTARTNOHAND;
16509 +			cond_resched();
16510  		}
16511  	}
16512  	return i;
16513 @@ -1419,6 +1423,12 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
16514  		while (len > 0) {
16515  			int offset, bytes_to_copy;
16516  
16517 +			if (fatal_signal_pending(current)) {
16518 +				ret = -ERESTARTNOHAND;
16519 +				goto out;
16520 +			}
16521 +			cond_resched();
16522 +
16523  			offset = pos % PAGE_SIZE;
16524  			if (offset == 0)
16525  				offset = PAGE_SIZE;
16526 @@ -1435,18 +1445,8 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
16527  			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
16528  				struct page *page;
16529  
16530 -#ifdef CONFIG_STACK_GROWSUP
16531 -				ret = expand_stack_downwards(bprm->vma, pos);
16532 -				if (ret < 0) {
16533 -					/* We've exceed the stack rlimit. */
16534 -					ret = -E2BIG;
16535 -					goto out;
16536 -				}
16537 -#endif
16538 -				ret = get_user_pages(current, bprm->mm, pos,
16539 -						     1, 1, 1, &page, NULL);
16540 -				if (ret <= 0) {
16541 -					/* We've exceed the stack rlimit. */
16542 +				page = get_arg_page(bprm, pos, 1);
16543 +				if (!page) {
16544  					ret = -E2BIG;
16545  					goto out;
16546  				}
16547 @@ -1567,8 +1567,10 @@ int compat_do_execve(char * filename,
16548  	return retval;
16549  
16550  out:
16551 -	if (bprm->mm)
16552 +	if (bprm->mm) {
16553 +		acct_arg_size(bprm, 0);
16554  		mmput(bprm->mm);
16555 +	}
16556  
16557  out_file:
16558  	if (bprm->file) {
16559 diff --git a/fs/direct-io.c b/fs/direct-io.c
16560 index 48d74c7..92030e3 100644
16561 --- a/fs/direct-io.c
16562 +++ b/fs/direct-io.c
16563 @@ -325,12 +325,16 @@ void dio_end_io(struct bio *bio, int error)
16564  }
16565  EXPORT_SYMBOL_GPL(dio_end_io);
16566  
16567 -static int
16568 +static void
16569  dio_bio_alloc(struct dio *dio, struct block_device *bdev,
16570  		sector_t first_sector, int nr_vecs)
16571  {
16572  	struct bio *bio;
16573  
16574 +	/*
16575 +	 * bio_alloc() is guaranteed to return a bio when called with
16576 +	 * __GFP_WAIT and we request a valid number of vectors.
16577 +	 */
16578  	bio = bio_alloc(GFP_KERNEL, nr_vecs);
16579  
16580  	bio->bi_bdev = bdev;
16581 @@ -342,7 +346,6 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
16582  
16583  	dio->bio = bio;
16584  	dio->logical_offset_in_bio = dio->cur_page_fs_offset;
16585 -	return 0;
16586  }
16587  
16588  /*
16589 @@ -583,8 +586,9 @@ static int dio_new_bio(struct dio *dio, sector_t start_sector)
16590  		goto out;
16591  	sector = start_sector << (dio->blkbits - 9);
16592  	nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
16593 +	nr_pages = min(nr_pages, BIO_MAX_PAGES);
16594  	BUG_ON(nr_pages <= 0);
16595 -	ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
16596 +	dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
16597  	dio->boundary = 0;
16598  out:
16599  	return ret;
16600 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
16601 index 3fbc942..9d1a22d 100644
16602 --- a/fs/ecryptfs/inode.c
16603 +++ b/fs/ecryptfs/inode.c
16604 @@ -32,6 +32,7 @@
16605  #include <linux/crypto.h>
16606  #include <linux/fs_stack.h>
16607  #include <linux/slab.h>
16608 +#include <linux/xattr.h>
16609  #include <asm/unaligned.h>
16610  #include "ecryptfs_kernel.h"
16611  
16612 @@ -70,15 +71,19 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
16613  	struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
16614  	struct dentry *dentry_save;
16615  	struct vfsmount *vfsmount_save;
16616 +	unsigned int flags_save;
16617  	int rc;
16618  
16619  	dentry_save = nd->path.dentry;
16620  	vfsmount_save = nd->path.mnt;
16621 +	flags_save = nd->flags;
16622  	nd->path.dentry = lower_dentry;
16623  	nd->path.mnt = lower_mnt;
16624 +	nd->flags &= ~LOOKUP_OPEN;
16625  	rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
16626  	nd->path.dentry = dentry_save;
16627  	nd->path.mnt = vfsmount_save;
16628 +	nd->flags = flags_save;
16629  	return rc;
16630  }
16631  
16632 @@ -1108,10 +1113,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
16633  		rc = -EOPNOTSUPP;
16634  		goto out;
16635  	}
16636 -	mutex_lock(&lower_dentry->d_inode->i_mutex);
16637 -	rc = lower_dentry->d_inode->i_op->setxattr(lower_dentry, name, value,
16638 -						   size, flags);
16639 -	mutex_unlock(&lower_dentry->d_inode->i_mutex);
16640 +
16641 +	rc = vfs_setxattr(lower_dentry, name, value, size, flags);
16642  out:
16643  	return rc;
16644  }
16645 diff --git a/fs/exec.c b/fs/exec.c
16646 index 56536ad..4b8f716 100644
16647 --- a/fs/exec.c
16648 +++ b/fs/exec.c
16649 @@ -159,7 +159,26 @@ out:
16650  
16651  #ifdef CONFIG_MMU
16652  
16653 -static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
16654 +void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
16655 +{
16656 +	struct mm_struct *mm = current->mm;
16657 +	long diff = (long)(pages - bprm->vma_pages);
16658 +
16659 +	if (!mm || !diff)
16660 +		return;
16661 +
16662 +	bprm->vma_pages = pages;
16663 +
16664 +#ifdef SPLIT_RSS_COUNTING
16665 +	add_mm_counter(mm, MM_ANONPAGES, diff);
16666 +#else
16667 +	spin_lock(&mm->page_table_lock);
16668 +	add_mm_counter(mm, MM_ANONPAGES, diff);
16669 +	spin_unlock(&mm->page_table_lock);
16670 +#endif
16671 +}
16672 +
16673 +struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
16674  		int write)
16675  {
16676  	struct page *page;
16677 @@ -181,6 +200,8 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
16678  		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
16679  		struct rlimit *rlim;
16680  
16681 +		acct_arg_size(bprm, size / PAGE_SIZE);
16682 +
16683  		/*
16684  		 * We've historically supported up to 32 pages (ARG_MAX)
16685  		 * of argument strings even with small stacks
16686 @@ -249,6 +270,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
16687  	vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
16688  	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
16689  	INIT_LIST_HEAD(&vma->anon_vma_chain);
16690 +
16691 +	err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
16692 +	if (err)
16693 +		goto err;
16694 +
16695  	err = insert_vm_struct(mm, vma);
16696  	if (err)
16697  		goto err;
16698 @@ -271,7 +297,11 @@ static bool valid_arg_len(struct linux_binprm *bprm, long len)
16699  
16700  #else
16701  
16702 -static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
16703 +void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
16704 +{
16705 +}
16706 +
16707 +struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
16708  		int write)
16709  {
16710  	struct page *page;
16711 @@ -994,6 +1024,7 @@ int flush_old_exec(struct linux_binprm * bprm)
16712  	/*
16713  	 * Release all of the old mmap stuff
16714  	 */
16715 +	acct_arg_size(bprm, 0);
16716  	retval = exec_mmap(bprm->mm);
16717  	if (retval)
16718  		goto out;
16719 @@ -1419,8 +1450,10 @@ int do_execve(const char * filename,
16720  	return retval;
16721  
16722  out:
16723 -	if (bprm->mm)
16724 -		mmput (bprm->mm);
16725 +	if (bprm->mm) {
16726 +		acct_arg_size(bprm, 0);
16727 +		mmput(bprm->mm);
16728 +	}
16729  
16730  out_file:
16731  	if (bprm->file) {
16732 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
16733 index 4b8debe..8705e36 100644
16734 --- a/fs/ext4/inode.c
16735 +++ b/fs/ext4/inode.c
16736 @@ -4530,6 +4530,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
16737  					(__le32 *) bh->b_data,
16738  					(__le32 *) bh->b_data + addr_per_block,
16739  					depth);
16740 +			brelse(bh);
16741  
16742  			/*
16743  			 * Everything below this this pointer has been
16744 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
16745 index 2614774..751997d 100644
16746 --- a/fs/ext4/super.c
16747 +++ b/fs/ext4/super.c
16748 @@ -719,6 +719,7 @@ static void ext4_put_super(struct super_block *sb)
16749  			ext4_abort(sb, "Couldn't clean up the journal");
16750  	}
16751  
16752 +	del_timer(&sbi->s_err_report);
16753  	ext4_release_system_zone(sb);
16754  	ext4_mb_release(sb);
16755  	ext4_ext_release(sb);
16756 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
16757 index c822458..6c2717d 100644
16758 --- a/fs/fuse/file.c
16759 +++ b/fs/fuse/file.c
16760 @@ -13,6 +13,7 @@
16761  #include <linux/kernel.h>
16762  #include <linux/sched.h>
16763  #include <linux/module.h>
16764 +#include <linux/compat.h>
16765  
16766  static const struct file_operations fuse_direct_io_file_operations;
16767  
16768 @@ -134,6 +135,7 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
16769  void fuse_finish_open(struct inode *inode, struct file *file)
16770  {
16771  	struct fuse_file *ff = file->private_data;
16772 +	struct fuse_conn *fc = get_fuse_conn(inode);
16773  
16774  	if (ff->open_flags & FOPEN_DIRECT_IO)
16775  		file->f_op = &fuse_direct_io_file_operations;
16776 @@ -141,6 +143,15 @@ void fuse_finish_open(struct inode *inode, struct file *file)
16777  		invalidate_inode_pages2(inode->i_mapping);
16778  	if (ff->open_flags & FOPEN_NONSEEKABLE)
16779  		nonseekable_open(inode, file);
16780 +	if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
16781 +		struct fuse_inode *fi = get_fuse_inode(inode);
16782 +
16783 +		spin_lock(&fc->lock);
16784 +		fi->attr_version = ++fc->attr_version;
16785 +		i_size_write(inode, 0);
16786 +		spin_unlock(&fc->lock);
16787 +		fuse_invalidate_attr(inode);
16788 +	}
16789  }
16790  
16791  int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
16792 @@ -1617,6 +1628,58 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
16793  	return 0;
16794  }
16795  
16796 +/* Make sure iov_length() won't overflow */
16797 +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
16798 +{
16799 +	size_t n;
16800 +	u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
16801 +
16802 +	for (n = 0; n < count; n++) {
16803 +		if (iov->iov_len > (size_t) max)
16804 +			return -ENOMEM;
16805 +		max -= iov->iov_len;
16806 +	}
16807 +	return 0;
16808 +}
16809 +
16810 +/*
16811 + * CUSE servers compiled on 32bit broke on 64bit kernels because the
16812 + * ABI was defined to be 'struct iovec' which is different on 32bit
16813 + * and 64bit.  Fortunately we can determine which structure the server
16814 + * used from the size of the reply.
16815 + */
16816 +static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src,
16817 +				 size_t transferred, unsigned count,
16818 +				 bool is_compat)
16819 +{
16820 +#ifdef CONFIG_COMPAT
16821 +	if (count * sizeof(struct compat_iovec) == transferred) {
16822 +		struct compat_iovec *ciov = src;
16823 +		unsigned i;
16824 +
16825 +		/*
16826 +		 * With this interface a 32bit server cannot support
16827 +		 * non-compat (i.e. ones coming from 64bit apps) ioctl
16828 +		 * requests
16829 +		 */
16830 +		if (!is_compat)
16831 +			return -EINVAL;
16832 +
16833 +		for (i = 0; i < count; i++) {
16834 +			dst[i].iov_base = compat_ptr(ciov[i].iov_base);
16835 +			dst[i].iov_len = ciov[i].iov_len;
16836 +		}
16837 +		return 0;
16838 +	}
16839 +#endif
16840 +
16841 +	if (count * sizeof(struct iovec) != transferred)
16842 +		return -EIO;
16843 +
16844 +	memcpy(dst, src, transferred);
16845 +	return 0;
16846 +}
16847 +
16848  /*
16849   * For ioctls, there is no generic way to determine how much memory
16850   * needs to be read and/or written.  Furthermore, ioctls are allowed
16851 @@ -1798,18 +1861,25 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
16852  		    in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
16853  			goto out;
16854  
16855 -		err = -EIO;
16856 -		if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
16857 -			goto out;
16858 -
16859 -		/* okay, copy in iovs and retry */
16860  		vaddr = kmap_atomic(pages[0], KM_USER0);
16861 -		memcpy(page_address(iov_page), vaddr, transferred);
16862 +		err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr,
16863 +					    transferred, in_iovs + out_iovs,
16864 +					    (flags & FUSE_IOCTL_COMPAT) != 0);
16865  		kunmap_atomic(vaddr, KM_USER0);
16866 +		if (err)
16867 +			goto out;
16868  
16869  		in_iov = page_address(iov_page);
16870  		out_iov = in_iov + in_iovs;
16871  
16872 +		err = fuse_verify_ioctl_iov(in_iov, in_iovs);
16873 +		if (err)
16874 +			goto out;
16875 +
16876 +		err = fuse_verify_ioctl_iov(out_iov, out_iovs);
16877 +		if (err)
16878 +			goto out;
16879 +
16880  		goto retry;
16881  	}
16882  
16883 diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
16884 index 6bbd75c..3ccb4e4 100644
16885 --- a/fs/hostfs/hostfs.h
16886 +++ b/fs/hostfs/hostfs.h
16887 @@ -96,7 +96,6 @@ extern int rename_file(char *from, char *to);
16888  extern int do_statfs(char *root, long *bsize_out, long long *blocks_out,
16889  		     long long *bfree_out, long long *bavail_out,
16890  		     long long *files_out, long long *ffree_out,
16891 -		     void *fsid_out, int fsid_size, long *namelen_out,
16892 -		     long *spare_out);
16893 +		     void *fsid_out, int fsid_size, long *namelen_out);
16894  
16895  #endif
16896 diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
16897 index f7dc9b5..cd7c939 100644
16898 --- a/fs/hostfs/hostfs_kern.c
16899 +++ b/fs/hostfs/hostfs_kern.c
16900 @@ -217,7 +217,7 @@ int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
16901  	err = do_statfs(dentry->d_sb->s_fs_info,
16902  			&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
16903  			&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
16904 -			&sf->f_namelen, sf->f_spare);
16905 +			&sf->f_namelen);
16906  	if (err)
16907  		return err;
16908  	sf->f_blocks = f_blocks;
16909 diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
16910 index 6777aa0..8d02683 100644
16911 --- a/fs/hostfs/hostfs_user.c
16912 +++ b/fs/hostfs/hostfs_user.c
16913 @@ -364,8 +364,7 @@ int rename_file(char *from, char *to)
16914  int do_statfs(char *root, long *bsize_out, long long *blocks_out,
16915  	      long long *bfree_out, long long *bavail_out,
16916  	      long long *files_out, long long *ffree_out,
16917 -	      void *fsid_out, int fsid_size, long *namelen_out,
16918 -	      long *spare_out)
16919 +	      void *fsid_out, int fsid_size, long *namelen_out)
16920  {
16921  	struct statfs64 buf;
16922  	int err;
16923 @@ -384,10 +383,6 @@ int do_statfs(char *root, long *bsize_out, long long *blocks_out,
16924  	       sizeof(buf.f_fsid) > fsid_size ? fsid_size :
16925  	       sizeof(buf.f_fsid));
16926  	*namelen_out = buf.f_namelen;
16927 -	spare_out[0] = buf.f_spare[0];
16928 -	spare_out[1] = buf.f_spare[1];
16929 -	spare_out[2] = buf.f_spare[2];
16930 -	spare_out[3] = buf.f_spare[3];
16931 -	spare_out[4] = buf.f_spare[4];
16932 +
16933  	return 0;
16934  }
16935 diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
16936 index b9c3c43..98e8e35 100644
16937 --- a/fs/nfs/delegation.c
16938 +++ b/fs/nfs/delegation.c
16939 @@ -24,8 +24,6 @@
16940  
16941  static void nfs_do_free_delegation(struct nfs_delegation *delegation)
16942  {
16943 -	if (delegation->cred)
16944 -		put_rpccred(delegation->cred);
16945  	kfree(delegation);
16946  }
16947  
16948 @@ -38,6 +36,10 @@ static void nfs_free_delegation_callback(struct rcu_head *head)
16949  
16950  static void nfs_free_delegation(struct nfs_delegation *delegation)
16951  {
16952 +	if (delegation->cred) {
16953 +		put_rpccred(delegation->cred);
16954 +		delegation->cred = NULL;
16955 +	}
16956  	call_rcu(&delegation->rcu, nfs_free_delegation_callback);
16957  }
16958  
16959 diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
16960 index 064a809..e0e9d49 100644
16961 --- a/fs/nfs/direct.c
16962 +++ b/fs/nfs/direct.c
16963 @@ -407,15 +407,18 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
16964  		pos += vec->iov_len;
16965  	}
16966  
16967 +	/*
16968 +	 * If no bytes were started, return the error, and let the
16969 +	 * generic layer handle the completion.
16970 +	 */
16971 +	if (requested_bytes == 0) {
16972 +		nfs_direct_req_release(dreq);
16973 +		return result < 0 ? result : -EIO;
16974 +	}
16975 +
16976  	if (put_dreq(dreq))
16977  		nfs_direct_complete(dreq);
16978 -
16979 -	if (requested_bytes != 0)
16980 -		return 0;
16981 -
16982 -	if (result < 0)
16983 -		return result;
16984 -	return -EIO;
16985 +	return 0;
16986  }
16987  
16988  static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
16989 @@ -841,15 +844,18 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
16990  		pos += vec->iov_len;
16991  	}
16992  
16993 +	/*
16994 +	 * If no bytes were started, return the error, and let the
16995 +	 * generic layer handle the completion.
16996 +	 */
16997 +	if (requested_bytes == 0) {
16998 +		nfs_direct_req_release(dreq);
16999 +		return result < 0 ? result : -EIO;
17000 +	}
17001 +
17002  	if (put_dreq(dreq))
17003  		nfs_direct_write_complete(dreq, dreq->inode);
17004 -
17005 -	if (requested_bytes != 0)
17006 -		return 0;
17007 -
17008 -	if (result < 0)
17009 -		return result;
17010 -	return -EIO;
17011 +	return 0;
17012  }
17013  
17014  static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
17015 @@ -873,7 +879,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
17016  	dreq->inode = inode;
17017  	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
17018  	dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
17019 -	if (dreq->l_ctx != NULL)
17020 +	if (dreq->l_ctx == NULL)
17021  		goto out_release;
17022  	if (!is_sync_kiocb(iocb))
17023  		dreq->iocb = iocb;
17024 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
17025 index 05bf3c0..22a185b 100644
17026 --- a/fs/nfs/file.c
17027 +++ b/fs/nfs/file.c
17028 @@ -551,7 +551,7 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
17029  	struct file *filp = vma->vm_file;
17030  	struct dentry *dentry = filp->f_path.dentry;
17031  	unsigned pagelen;
17032 -	int ret = -EINVAL;
17033 +	int ret = VM_FAULT_NOPAGE;
17034  	struct address_space *mapping;
17035  
17036  	dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n",
17037 @@ -567,21 +567,20 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
17038  	if (mapping != dentry->d_inode->i_mapping)
17039  		goto out_unlock;
17040  
17041 -	ret = 0;
17042  	pagelen = nfs_page_length(page);
17043  	if (pagelen == 0)
17044  		goto out_unlock;
17045  
17046 -	ret = nfs_flush_incompatible(filp, page);
17047 -	if (ret != 0)
17048 -		goto out_unlock;
17049 +	ret = VM_FAULT_LOCKED;
17050 +	if (nfs_flush_incompatible(filp, page) == 0 &&
17051 +	    nfs_updatepage(filp, page, 0, pagelen) == 0)
17052 +		goto out;
17053  
17054 -	ret = nfs_updatepage(filp, page, 0, pagelen);
17055 +	ret = VM_FAULT_SIGBUS;
17056  out_unlock:
17057 -	if (!ret)
17058 -		return VM_FAULT_LOCKED;
17059  	unlock_page(page);
17060 -	return VM_FAULT_SIGBUS;
17061 +out:
17062 +	return ret;
17063  }
17064  
17065  static const struct vm_operations_struct nfs_file_vm_ops = {
17066 @@ -688,6 +687,7 @@ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
17067  {
17068  	struct inode *inode = filp->f_mapping->host;
17069  	int status = 0;
17070 +	unsigned int saved_type = fl->fl_type;
17071  
17072  	/* Try local locking first */
17073  	posix_test_lock(filp, fl);
17074 @@ -695,6 +695,7 @@ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
17075  		/* found a conflict */
17076  		goto out;
17077  	}
17078 +	fl->fl_type = saved_type;
17079  
17080  	if (nfs_have_delegation(inode, FMODE_READ))
17081  		goto out_noconflict;
17082 diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
17083 index 59047f8..3dde50c 100644
17084 --- a/fs/nfs/mount_clnt.c
17085 +++ b/fs/nfs/mount_clnt.c
17086 @@ -503,13 +503,13 @@ static struct rpc_procinfo mnt3_procedures[] = {
17087  
17088  static struct rpc_version mnt_version1 = {
17089  	.number		= 1,
17090 -	.nrprocs	= 2,
17091 +	.nrprocs	= ARRAY_SIZE(mnt_procedures),
17092  	.procs		= mnt_procedures,
17093  };
17094  
17095  static struct rpc_version mnt_version3 = {
17096  	.number		= 3,
17097 -	.nrprocs	= 2,
17098 +	.nrprocs	= ARRAY_SIZE(mnt3_procedures),
17099  	.procs		= mnt3_procedures,
17100  };
17101  
17102 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
17103 index 089da5b..74aa54e 100644
17104 --- a/fs/nfs/nfs4proc.c
17105 +++ b/fs/nfs/nfs4proc.c
17106 @@ -255,9 +255,6 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
17107  			nfs4_state_mark_reclaim_nograce(clp, state);
17108  			goto do_state_recovery;
17109  		case -NFS4ERR_STALE_STATEID:
17110 -			if (state == NULL)
17111 -				break;
17112 -			nfs4_state_mark_reclaim_reboot(clp, state);
17113  		case -NFS4ERR_STALE_CLIENTID:
17114  		case -NFS4ERR_EXPIRED:
17115  			goto do_state_recovery;
17116 @@ -1120,6 +1117,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
17117  	clear_bit(NFS_DELEGATED_STATE, &state->flags);
17118  	smp_rmb();
17119  	if (state->n_rdwr != 0) {
17120 +		clear_bit(NFS_O_RDWR_STATE, &state->flags);
17121  		ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
17122  		if (ret != 0)
17123  			return ret;
17124 @@ -1127,6 +1125,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
17125  			return -ESTALE;
17126  	}
17127  	if (state->n_wronly != 0) {
17128 +		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
17129  		ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
17130  		if (ret != 0)
17131  			return ret;
17132 @@ -1134,6 +1133,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
17133  			return -ESTALE;
17134  	}
17135  	if (state->n_rdonly != 0) {
17136 +		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
17137  		ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
17138  		if (ret != 0)
17139  			return ret;
17140 @@ -3490,9 +3490,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
17141  			nfs4_state_mark_reclaim_nograce(clp, state);
17142  			goto do_state_recovery;
17143  		case -NFS4ERR_STALE_STATEID:
17144 -			if (state == NULL)
17145 -				break;
17146 -			nfs4_state_mark_reclaim_reboot(clp, state);
17147  		case -NFS4ERR_STALE_CLIENTID:
17148  		case -NFS4ERR_EXPIRED:
17149  			goto do_state_recovery;
17150 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
17151 index 3e2f19b..940cf7c 100644
17152 --- a/fs/nfs/nfs4state.c
17153 +++ b/fs/nfs/nfs4state.c
17154 @@ -1138,16 +1138,14 @@ static void nfs4_reclaim_complete(struct nfs_client *clp,
17155  		(void)ops->reclaim_complete(clp);
17156  }
17157  
17158 -static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
17159 +static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
17160  {
17161  	struct nfs4_state_owner *sp;
17162  	struct rb_node *pos;
17163  	struct nfs4_state *state;
17164  
17165  	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
17166 -		return;
17167 -
17168 -	nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
17169 +		return 0;
17170  
17171  	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
17172  		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
17173 @@ -1161,6 +1159,14 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
17174  	}
17175  
17176  	nfs_delegation_reap_unclaimed(clp);
17177 +	return 1;
17178 +}
17179 +
17180 +static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
17181 +{
17182 +	if (!nfs4_state_clear_reclaim_reboot(clp))
17183 +		return;
17184 +	nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
17185  }
17186  
17187  static void nfs_delegation_clear_all(struct nfs_client *clp)
17188 @@ -1187,7 +1193,7 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
17189  		case -NFS4ERR_STALE_CLIENTID:
17190  		case -NFS4ERR_LEASE_MOVED:
17191  			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
17192 -			nfs4_state_end_reclaim_reboot(clp);
17193 +			nfs4_state_clear_reclaim_reboot(clp);
17194  			nfs4_state_start_reclaim_reboot(clp);
17195  			break;
17196  		case -NFS4ERR_EXPIRED:
17197 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
17198 index 9194902..137b549 100644
17199 --- a/fs/nfs/pagelist.c
17200 +++ b/fs/nfs/pagelist.c
17201 @@ -65,6 +65,13 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
17202  	if (req == NULL)
17203  		return ERR_PTR(-ENOMEM);
17204  
17205 +	/* get lock context early so we can deal with alloc failures */
17206 +	req->wb_lock_context = nfs_get_lock_context(ctx);
17207 +	if (req->wb_lock_context == NULL) {
17208 +		nfs_page_free(req);
17209 +		return ERR_PTR(-ENOMEM);
17210 +	}
17211 +
17212  	/* Initialize the request struct. Initially, we assume a
17213  	 * long write-back delay. This will be adjusted in
17214  	 * update_nfs_request below if the region is not locked. */
17215 @@ -79,7 +86,6 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
17216  	req->wb_pgbase	= offset;
17217  	req->wb_bytes   = count;
17218  	req->wb_context = get_nfs_open_context(ctx);
17219 -	req->wb_lock_context = nfs_get_lock_context(ctx);
17220  	kref_init(&req->wb_kref);
17221  	return req;
17222  }
17223 diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
17224 index 2a533a0..7e84a85 100644
17225 --- a/fs/nfsd/nfs3xdr.c
17226 +++ b/fs/nfsd/nfs3xdr.c
17227 @@ -260,9 +260,11 @@ void fill_post_wcc(struct svc_fh *fhp)
17228  	err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
17229  			&fhp->fh_post_attr);
17230  	fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
17231 -	if (err)
17232 +	if (err) {
17233  		fhp->fh_post_saved = 0;
17234 -	else
17235 +		/* Grab the ctime anyway - set_change_info might use it */
17236 +		fhp->fh_post_attr.ctime = fhp->fh_dentry->d_inode->i_ctime;
17237 +	} else
17238  		fhp->fh_post_saved = 1;
17239  }
17240  
17241 diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
17242 index 4d476ff..60fce3d 100644
17243 --- a/fs/nfsd/xdr4.h
17244 +++ b/fs/nfsd/xdr4.h
17245 @@ -484,18 +484,17 @@ static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
17246  static inline void
17247  set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
17248  {
17249 -	BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved);
17250 -	cinfo->atomic = 1;
17251 +	BUG_ON(!fhp->fh_pre_saved);
17252 +	cinfo->atomic = fhp->fh_post_saved;
17253  	cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
17254 -	if (cinfo->change_supported) {
17255 -		cinfo->before_change = fhp->fh_pre_change;
17256 -		cinfo->after_change = fhp->fh_post_change;
17257 -	} else {
17258 -		cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
17259 -		cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
17260 -		cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
17261 -		cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
17262 -	}
17263 +
17264 +	cinfo->before_change = fhp->fh_pre_change;
17265 +	cinfo->after_change = fhp->fh_post_change;
17266 +	cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
17267 +	cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
17268 +	cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
17269 +	cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
17270 +
17271  }
17272  
17273  int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
17274 diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
17275 index 9222633..57878bd 100644
17276 --- a/fs/nilfs2/super.c
17277 +++ b/fs/nilfs2/super.c
17278 @@ -733,7 +733,8 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi)
17279  		cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
17280  	sbp[0]->s_mtime = cpu_to_le64(get_seconds());
17281  	/* synchronize sbp[1] with sbp[0] */
17282 -	memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
17283 +	if (sbp[1])
17284 +		memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
17285  	return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
17286  }
17287  
17288 diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
17289 index bf7f6d7..5b7c6fe 100644
17290 --- a/fs/notify/inotify/inotify_user.c
17291 +++ b/fs/notify/inotify/inotify_user.c
17292 @@ -751,6 +751,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
17293  	if (ret >= 0)
17294  		return ret;
17295  
17296 +	fsnotify_put_group(group);
17297  	atomic_dec(&user->inotify_devs);
17298  out_free_uid:
17299  	free_uid(user);
17300 diff --git a/fs/pipe.c b/fs/pipe.c
17301 index 279eef9..a58d7ee 100644
17302 --- a/fs/pipe.c
17303 +++ b/fs/pipe.c
17304 @@ -382,7 +382,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
17305  			error = ops->confirm(pipe, buf);
17306  			if (error) {
17307  				if (!ret)
17308 -					error = ret;
17309 +					ret = error;
17310  				break;
17311  			}
17312  
17313 @@ -1197,12 +1197,24 @@ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
17314  	return ret;
17315  }
17316  
17317 +/*
17318 + * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
17319 + * location, so checking ->i_pipe is not enough to verify that this is a
17320 + * pipe.
17321 + */
17322 +struct pipe_inode_info *get_pipe_info(struct file *file)
17323 +{
17324 +	struct inode *i = file->f_path.dentry->d_inode;
17325 +
17326 +	return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
17327 +}
17328 +
17329  long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
17330  {
17331  	struct pipe_inode_info *pipe;
17332  	long ret;
17333  
17334 -	pipe = file->f_path.dentry->d_inode->i_pipe;
17335 +	pipe = get_pipe_info(file);
17336  	if (!pipe)
17337  		return -EBADF;
17338  
17339 diff --git a/fs/proc/base.c b/fs/proc/base.c
17340 index 8e4adda..632b907 100644
17341 --- a/fs/proc/base.c
17342 +++ b/fs/proc/base.c
17343 @@ -1526,7 +1526,7 @@ static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
17344  	if (!tmp)
17345  		return -ENOMEM;
17346  
17347 -	pathname = d_path_with_unreachable(path, tmp, PAGE_SIZE);
17348 +	pathname = d_path(path, tmp, PAGE_SIZE);
17349  	len = PTR_ERR(pathname);
17350  	if (IS_ERR(pathname))
17351  		goto out;
17352 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
17353 index 6f37c39..d245cb2 100644
17354 --- a/fs/proc/kcore.c
17355 +++ b/fs/proc/kcore.c
17356 @@ -558,7 +558,7 @@ static int open_kcore(struct inode *inode, struct file *filp)
17357  static const struct file_operations proc_kcore_operations = {
17358  	.read		= read_kcore,
17359  	.open		= open_kcore,
17360 -	.llseek		= generic_file_llseek,
17361 +	.llseek		= default_llseek,
17362  };
17363  
17364  #ifdef CONFIG_MEMORY_HOTPLUG
17365 diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
17366 index 5cbb81e..4131f4a 100644
17367 --- a/fs/reiserfs/ioctl.c
17368 +++ b/fs/reiserfs/ioctl.c
17369 @@ -186,12 +186,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
17370  		return 0;
17371  	}
17372  
17373 -	/* we need to make sure nobody is changing the file size beneath
17374 -	 ** us
17375 -	 */
17376 -	reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
17377  	depth = reiserfs_write_lock_once(inode->i_sb);
17378  
17379 +	/* we need to make sure nobody is changing the file size beneath us */
17380 +	reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
17381 +
17382  	write_from = inode->i_size & (blocksize - 1);
17383  	/* if we are on a block boundary, we are already unpacked.  */
17384  	if (write_from == 0) {
17385 diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
17386 index 536d697..90d2fcb 100644
17387 --- a/fs/reiserfs/xattr_acl.c
17388 +++ b/fs/reiserfs/xattr_acl.c
17389 @@ -472,7 +472,9 @@ int reiserfs_acl_chmod(struct inode *inode)
17390  		struct reiserfs_transaction_handle th;
17391  		size_t size = reiserfs_xattr_nblocks(inode,
17392  					     reiserfs_acl_size(clone->a_count));
17393 -		reiserfs_write_lock(inode->i_sb);
17394 +		int depth;
17395 +
17396 +		depth = reiserfs_write_lock_once(inode->i_sb);
17397  		error = journal_begin(&th, inode->i_sb, size * 2);
17398  		if (!error) {
17399  			int error2;
17400 @@ -482,7 +484,7 @@ int reiserfs_acl_chmod(struct inode *inode)
17401  			if (error2)
17402  				error = error2;
17403  		}
17404 -		reiserfs_write_unlock(inode->i_sb);
17405 +		reiserfs_write_unlock_once(inode->i_sb, depth);
17406  	}
17407  	posix_acl_release(clone);
17408  	return error;
17409 diff --git a/fs/splice.c b/fs/splice.c
17410 index 8f1dfae..ce2f025 100644
17411 --- a/fs/splice.c
17412 +++ b/fs/splice.c
17413 @@ -1311,18 +1311,6 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
17414  static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
17415  			       struct pipe_inode_info *opipe,
17416  			       size_t len, unsigned int flags);
17417 -/*
17418 - * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
17419 - * location, so checking ->i_pipe is not enough to verify that this is a
17420 - * pipe.
17421 - */
17422 -static inline struct pipe_inode_info *pipe_info(struct inode *inode)
17423 -{
17424 -	if (S_ISFIFO(inode->i_mode))
17425 -		return inode->i_pipe;
17426 -
17427 -	return NULL;
17428 -}
17429  
17430  /*
17431   * Determine where to splice to/from.
17432 @@ -1336,8 +1324,8 @@ static long do_splice(struct file *in, loff_t __user *off_in,
17433  	loff_t offset, *off;
17434  	long ret;
17435  
17436 -	ipipe = pipe_info(in->f_path.dentry->d_inode);
17437 -	opipe = pipe_info(out->f_path.dentry->d_inode);
17438 +	ipipe = get_pipe_info(in);
17439 +	opipe = get_pipe_info(out);
17440  
17441  	if (ipipe && opipe) {
17442  		if (off_in || off_out)
17443 @@ -1555,7 +1543,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
17444  	int error;
17445  	long ret;
17446  
17447 -	pipe = pipe_info(file->f_path.dentry->d_inode);
17448 +	pipe = get_pipe_info(file);
17449  	if (!pipe)
17450  		return -EBADF;
17451  
17452 @@ -1642,7 +1630,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
17453  	};
17454  	long ret;
17455  
17456 -	pipe = pipe_info(file->f_path.dentry->d_inode);
17457 +	pipe = get_pipe_info(file);
17458  	if (!pipe)
17459  		return -EBADF;
17460  
17461 @@ -2022,8 +2010,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
17462  static long do_tee(struct file *in, struct file *out, size_t len,
17463  		   unsigned int flags)
17464  {
17465 -	struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode);
17466 -	struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode);
17467 +	struct pipe_inode_info *ipipe = get_pipe_info(in);
17468 +	struct pipe_inode_info *opipe = get_pipe_info(out);
17469  	int ret = -EINVAL;
17470  
17471  	/*
17472 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
17473 index 883c1d4..40b1f0e 100644
17474 --- a/include/drm/drm_pciids.h
17475 +++ b/include/drm/drm_pciids.h
17476 @@ -28,7 +28,6 @@
17477  	{0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
17478  	{0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
17479  	{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
17480 -	{0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
17481  	{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
17482  	{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
17483  	{0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
17484 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
17485 index a065612..64a7114 100644
17486 --- a/include/linux/binfmts.h
17487 +++ b/include/linux/binfmts.h
17488 @@ -29,6 +29,7 @@ struct linux_binprm{
17489  	char buf[BINPRM_BUF_SIZE];
17490  #ifdef CONFIG_MMU
17491  	struct vm_area_struct *vma;
17492 +	unsigned long vma_pages;
17493  #else
17494  # define MAX_ARG_PAGES	32
17495  	struct page *page[MAX_ARG_PAGES];
17496 @@ -59,6 +60,10 @@ struct linux_binprm{
17497  	unsigned long loader, exec;
17498  };
17499  
17500 +extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
17501 +extern struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
17502 +					int write);
17503 +
17504  #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
17505  #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
17506  
17507 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
17508 index 2c54906..f142a7f 100644
17509 --- a/include/linux/blkdev.h
17510 +++ b/include/linux/blkdev.h
17511 @@ -246,7 +246,7 @@ struct queue_limits {
17512  
17513  	unsigned char		misaligned;
17514  	unsigned char		discard_misaligned;
17515 -	unsigned char		no_cluster;
17516 +	unsigned char		cluster;
17517  	signed char		discard_zeroes_data;
17518  };
17519  
17520 @@ -369,7 +369,6 @@ struct request_queue
17521  #endif
17522  };
17523  
17524 -#define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */
17525  #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
17526  #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
17527  #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */
17528 @@ -392,7 +391,6 @@ struct request_queue
17529  #define QUEUE_FLAG_SECDISCARD  19	/* supports SECDISCARD */
17530  
17531  #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
17532 -				 (1 << QUEUE_FLAG_CLUSTER) |		\
17533  				 (1 << QUEUE_FLAG_STACKABLE)	|	\
17534  				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
17535  				 (1 << QUEUE_FLAG_ADD_RANDOM))
17536 @@ -550,6 +548,11 @@ enum {
17537  
17538  #define rq_data_dir(rq)		((rq)->cmd_flags & 1)
17539  
17540 +static inline unsigned int blk_queue_cluster(struct request_queue *q)
17541 +{
17542 +	return q->limits.cluster;
17543 +}
17544 +
17545  /*
17546   * We regard a request as sync, if either a read or a sync write
17547   */
17548 @@ -851,7 +854,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
17549  extern void blk_queue_max_discard_sectors(struct request_queue *q,
17550  		unsigned int max_discard_sectors);
17551  extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
17552 -extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
17553 +extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
17554  extern void blk_queue_alignment_offset(struct request_queue *q,
17555  				       unsigned int alignment);
17556  extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
17557 @@ -1004,7 +1007,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
17558  	return q->limits.physical_block_size;
17559  }
17560  
17561 -static inline int bdev_physical_block_size(struct block_device *bdev)
17562 +static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
17563  {
17564  	return queue_physical_block_size(bdev_get_queue(bdev));
17565  }
17566 diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
17567 index 266ab92..499dfe9 100644
17568 --- a/include/linux/bootmem.h
17569 +++ b/include/linux/bootmem.h
17570 @@ -105,6 +105,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
17571  
17572  #define alloc_bootmem(x) \
17573  	__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
17574 +#define alloc_bootmem_align(x, align) \
17575 +	__alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS))
17576  #define alloc_bootmem_nopanic(x) \
17577  	__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
17578  #define alloc_bootmem_pages(x) \
17579 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
17580 index 975609c..81483c2 100644
17581 --- a/include/linux/gfp.h
17582 +++ b/include/linux/gfp.h
17583 @@ -339,7 +339,7 @@ void drain_local_pages(void *dummy);
17584  
17585  extern gfp_t gfp_allowed_mask;
17586  
17587 -extern void set_gfp_allowed_mask(gfp_t mask);
17588 -extern gfp_t clear_gfp_allowed_mask(gfp_t mask);
17589 +extern void pm_restrict_gfp_mask(void);
17590 +extern void pm_restore_gfp_mask(void);
17591  
17592  #endif /* __LINUX_GFP_H */
17593 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
17594 index 76da541..0315d04 100644
17595 --- a/include/linux/hrtimer.h
17596 +++ b/include/linux/hrtimer.h
17597 @@ -378,6 +378,8 @@ extern void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info);
17598  extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info,
17599  			struct hrtimer *timer, ktime_t time,
17600  			const enum hrtimer_mode mode);
17601 +extern int hrtimer_pull_cancel(int cpu, struct hrtimer *timer,
17602 +			       struct hrtimer_start_on_info *info);
17603  #endif
17604  
17605  extern int hrtimer_cancel(struct hrtimer *timer);
17606 diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
17607 index 97b2eae..731854c 100644
17608 --- a/include/linux/ieee80211.h
17609 +++ b/include/linux/ieee80211.h
17610 @@ -959,7 +959,7 @@ struct ieee80211_ht_info {
17611  /* block-ack parameters */
17612  #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
17613  #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
17614 -#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
17615 +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
17616  #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
17617  #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
17618  
17619 diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
17620 index 62dbee5..c238ad2 100644
17621 --- a/include/linux/kfifo.h
17622 +++ b/include/linux/kfifo.h
17623 @@ -171,11 +171,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
17624  	}
17625  
17626  
17627 -static inline unsigned int __must_check
17628 -__kfifo_must_check_helper(unsigned int val)
17629 -{
17630 -	return val;
17631 -}
17632 +/* __kfifo_must_check_helper() is temporarily disabled because it was faulty */
17633 +#define __kfifo_must_check_helper(x) (x)
17634  
17635  /**
17636   * kfifo_initialized - Check if the fifo is initialized
17637 diff --git a/include/linux/klist.h b/include/linux/klist.h
17638 index e91a4e5..a370ce5 100644
17639 --- a/include/linux/klist.h
17640 +++ b/include/linux/klist.h
17641 @@ -22,7 +22,7 @@ struct klist {
17642  	struct list_head	k_list;
17643  	void			(*get)(struct klist_node *);
17644  	void			(*put)(struct klist_node *);
17645 -} __attribute__ ((aligned (4)));
17646 +} __attribute__ ((aligned (sizeof(void *))));
17647  
17648  #define KLIST_INIT(_name, _get, _put)					\
17649  	{ .k_lock	= __SPIN_LOCK_UNLOCKED(_name.k_lock),		\
17650 diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
17651 index 5c51f36..add8a1b 100644
17652 --- a/include/linux/mfd/wm8994/pdata.h
17653 +++ b/include/linux/mfd/wm8994/pdata.h
17654 @@ -29,7 +29,7 @@ struct wm8994_ldo_pdata {
17655  #define WM8994_CONFIGURE_GPIO 0x8000
17656  
17657  #define WM8994_DRC_REGS 5
17658 -#define WM8994_EQ_REGS  19
17659 +#define WM8994_EQ_REGS  20
17660  
17661  /**
17662   * DRC configurations are specified with a label and a set of register
17663 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
17664 index 3984c4e..8d789d7 100644
17665 --- a/include/linux/mmzone.h
17666 +++ b/include/linux/mmzone.h
17667 @@ -448,12 +448,6 @@ static inline int zone_is_oom_locked(const struct zone *zone)
17668  	return test_bit(ZONE_OOM_LOCKED, &zone->flags);
17669  }
17670  
17671 -#ifdef CONFIG_SMP
17672 -unsigned long zone_nr_free_pages(struct zone *zone);
17673 -#else
17674 -#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
17675 -#endif /* CONFIG_SMP */
17676 -
17677  /*
17678   * The "priority" of VM scanning is how much of the queues we will scan in one
17679   * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
17680 @@ -651,7 +645,9 @@ typedef struct pglist_data {
17681  extern struct mutex zonelists_mutex;
17682  void build_all_zonelists(void *data);
17683  void wakeup_kswapd(struct zone *zone, int order);
17684 -int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
17685 +bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
17686 +		int classzone_idx, int alloc_flags);
17687 +bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
17688  		int classzone_idx, int alloc_flags);
17689  enum memmap_context {
17690  	MEMMAP_EARLY,
17691 diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
17692 index 89341c3..03317c8 100644
17693 --- a/include/linux/netfilter.h
17694 +++ b/include/linux/netfilter.h
17695 @@ -215,7 +215,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
17696  	int ret;
17697  
17698  	if (!cond ||
17699 -	    (ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1))
17700 +	    ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
17701  		ret = okfn(skb);
17702  	return ret;
17703  }
17704 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
17705 index 570fdde..db8aa93 100644
17706 --- a/include/linux/pci_ids.h
17707 +++ b/include/linux/pci_ids.h
17708 @@ -2041,6 +2041,7 @@
17709  #define PCI_DEVICE_ID_AFAVLAB_P030	0x2182
17710  #define PCI_SUBDEVICE_ID_AFAVLAB_P061		0x2150
17711  
17712 +#define PCI_VENDOR_ID_BCM_GVC          0x14a4
17713  #define PCI_VENDOR_ID_BROADCOM		0x14e4
17714  #define PCI_DEVICE_ID_TIGON3_5752	0x1600
17715  #define PCI_DEVICE_ID_TIGON3_5752M	0x1601
17716 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
17717 index 716f99b..1d42c6e 100644
17718 --- a/include/linux/perf_event.h
17719 +++ b/include/linux/perf_event.h
17720 @@ -788,6 +788,7 @@ struct perf_event_context {
17721  	int				nr_active;
17722  	int				is_active;
17723  	int				nr_stat;
17724 +	int				rotate_disable;
17725  	atomic_t			refcount;
17726  	struct task_struct		*task;
17727  
17728 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
17729 index 4457969..bb27d7e 100644
17730 --- a/include/linux/pipe_fs_i.h
17731 +++ b/include/linux/pipe_fs_i.h
17732 @@ -160,5 +160,6 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
17733  
17734  /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
17735  long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
17736 +struct pipe_inode_info *get_pipe_info(struct file *file);
17737  
17738  #endif
17739 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
17740 index 6e81888..5ea4b15 100644
17741 --- a/include/linux/pm_runtime.h
17742 +++ b/include/linux/pm_runtime.h
17743 @@ -67,7 +67,8 @@ static inline void device_set_run_wake(struct device *dev, bool enable)
17744  
17745  static inline bool pm_runtime_suspended(struct device *dev)
17746  {
17747 -	return dev->power.runtime_status == RPM_SUSPENDED;
17748 +	return dev->power.runtime_status == RPM_SUSPENDED
17749 +		&& !dev->power.disable_depth;
17750  }
17751  
17752  #else /* !CONFIG_PM_RUNTIME */
17753 diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
17754 index 634b8e6..9f38fe5 100644
17755 --- a/include/linux/radix-tree.h
17756 +++ b/include/linux/radix-tree.h
17757 @@ -36,17 +36,6 @@
17758   * RCU.
17759   */
17760  #define RADIX_TREE_INDIRECT_PTR	1
17761 -#define RADIX_TREE_RETRY ((void *)-1UL)
17762 -
17763 -static inline void *radix_tree_ptr_to_indirect(void *ptr)
17764 -{
17765 -	return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
17766 -}
17767 -
17768 -static inline void *radix_tree_indirect_to_ptr(void *ptr)
17769 -{
17770 -	return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
17771 -}
17772  
17773  static inline int radix_tree_is_indirect_ptr(void *ptr)
17774  {
17775 @@ -138,16 +127,29 @@ do {									\
17776   *		removed.
17777   *
17778   * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
17779 - * locked across slot lookup and dereference.  More likely, will be used with
17780 - * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
17781 + * locked across slot lookup and dereference. Not required if write lock is
17782 + * held (ie. items cannot be concurrently inserted).
17783 + *
17784 + * radix_tree_deref_retry must be used to confirm validity of the pointer if
17785 + * only the read lock is held.
17786   */
17787  static inline void *radix_tree_deref_slot(void **pslot)
17788  {
17789 -	void *ret = rcu_dereference(*pslot);
17790 -	if (unlikely(radix_tree_is_indirect_ptr(ret)))
17791 -		ret = RADIX_TREE_RETRY;
17792 -	return ret;
17793 +	return rcu_dereference(*pslot);
17794  }
17795 +
17796 +/**
17797 + * radix_tree_deref_retry	- check radix_tree_deref_slot
17798 + * @arg:	pointer returned by radix_tree_deref_slot
17799 + * Returns:	0 if retry is not required, otherwise retry is required
17800 + *
17801 + * radix_tree_deref_retry must be used with radix_tree_deref_slot.
17802 + */
17803 +static inline int radix_tree_deref_retry(void *arg)
17804 +{
17805 +	return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
17806 +}
17807 +
17808  /**
17809   * radix_tree_replace_slot	- replace item in a slot
17810   * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
17811 diff --git a/include/linux/sched.h b/include/linux/sched.h
17812 index c9ac4fc..cdb9672 100644
17813 --- a/include/linux/sched.h
17814 +++ b/include/linux/sched.h
17815 @@ -147,7 +147,7 @@ extern unsigned long nr_iowait_cpu(int cpu);
17816  extern unsigned long this_cpu_load(void);
17817  
17818  
17819 -extern void calc_global_load(void);
17820 +extern void calc_global_load(unsigned long ticks);
17821  
17822  extern unsigned long get_parent_ip(unsigned long addr);
17823  
17824 diff --git a/include/linux/socket.h b/include/linux/socket.h
17825 index a8f56e1..a2fada9 100644
17826 --- a/include/linux/socket.h
17827 +++ b/include/linux/socket.h
17828 @@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
17829  					  int offset, 
17830  					  unsigned int len, __wsum *csump);
17831  
17832 -extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
17833 +extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
17834  extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
17835  extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
17836  			     int offset, int len);
17837 diff --git a/include/linux/tty.h b/include/linux/tty.h
17838 index 67d64e6..6f62c30 100644
17839 --- a/include/linux/tty.h
17840 +++ b/include/linux/tty.h
17841 @@ -365,6 +365,7 @@ struct tty_file_private {
17842  #define TTY_HUPPED 		18	/* Post driver->hangup() */
17843  #define TTY_FLUSHING		19	/* Flushing to ldisc in progress */
17844  #define TTY_FLUSHPENDING	20	/* Queued buffer flush pending */
17845 +#define TTY_HUPPING 		21	/* ->hangup() in progress */
17846  
17847  #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
17848  
17849 diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
17850 index 55675b1..e02848c 100644
17851 --- a/include/linux/usb/serial.h
17852 +++ b/include/linux/usb/serial.h
17853 @@ -345,6 +345,9 @@ extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
17854  extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
17855  					unsigned int ch);
17856  extern int usb_serial_handle_break(struct usb_serial_port *port);
17857 +extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
17858 +					 struct tty_struct *tty,
17859 +					 unsigned int status);
17860  
17861  
17862  extern int usb_serial_bus_register(struct usb_serial_driver *device);
17863 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
17864 index eaaea37..e4cc21c 100644
17865 --- a/include/linux/vmstat.h
17866 +++ b/include/linux/vmstat.h
17867 @@ -254,6 +254,8 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
17868  extern void __dec_zone_state(struct zone *, enum zone_stat_item);
17869  
17870  void refresh_cpu_vm_stats(int);
17871 +void reduce_pgdat_percpu_threshold(pg_data_t *pgdat);
17872 +void restore_pgdat_percpu_threshold(pg_data_t *pgdat);
17873  #else /* CONFIG_SMP */
17874  
17875  /*
17876 @@ -298,6 +300,9 @@ static inline void __dec_zone_page_state(struct page *page,
17877  #define dec_zone_page_state __dec_zone_page_state
17878  #define mod_zone_page_state __mod_zone_page_state
17879  
17880 +static inline void reduce_pgdat_percpu_threshold(pg_data_t *pgdat) { }
17881 +static inline void restore_pgdat_percpu_threshold(pg_data_t *pgdat) { }
17882 +
17883  static inline void refresh_cpu_vm_stats(int cpu) { }
17884  #endif
17885  
17886 diff --git a/include/litmus/budget.h b/include/litmus/budget.h
17887 index 732530e..130a5c5 100644
17888 --- a/include/litmus/budget.h
17889 +++ b/include/litmus/budget.h
17890 @@ -1,8 +1,44 @@
17891  #ifndef _LITMUS_BUDGET_H_
17892  #define _LITMUS_BUDGET_H_
17893  
17894 -/* Update the per-processor enforcement timer (arm/reproram/cancel) for
17895 - * the next task. */
17896 +/**
17897 + * update_enforcement_timer() - Update per-processor enforcement timer for
17898 + * the next scheduled task.
17899 + *
17900 + * If @t is not NULL and has a precisely enforced budget, the timer will be
17901 + * armed to trigger a reschedule when the budget is exhausted. Otherwise,
17902 + * the timer will be cancelled.
17903 +*/
17904  void update_enforcement_timer(struct task_struct* t);
17905  
17906 +/* True if a task's server has progressed farther than the task
17907 + * itself. This happens when budget enforcement has caused a task to be
17908 + * booted off until the next period.
17909 + */
17910 +#define behind_server(t)\
17911 +	(lt_before((t)->rt_param.job_params.real_release, get_release(t)))
17912 +
17913 +/**
17914 + * server_release() - Prepare the task server parameters for the next period.
17915 + * The server for @t is what is actually executed from the schedulers
17916 + * perspective.
17917 + */
17918 +void server_release(struct task_struct *t);
17919 +void server_release_crit_c(struct task_struct *t);
17920 +
17921 +/**
17922 + * task_release() - Prepare actual task parameters for the next period.
17923 + * The actual task parameters for @t, real_deadline and real_release, are
17924 + * the deadline and release from the tasks perspective. We only record these
17925 + * so that we can write them to feather trace.
17926 + */
17927 +void task_release(struct task_struct *t);
17928 +
17929 +/* Functions for virtual timer. */
17930 +struct _rt_domain;
17931 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
17932 +extern lt_t act_to_virt(struct _rt_domain* rt, lt_t act);
17933 +extern lt_t virt_to_act(struct _rt_domain* rt, lt_t virt);
17934 +#endif
17935 +
17936  #endif
17937 diff --git a/include/litmus/ce_domain.h b/include/litmus/ce_domain.h
17938 new file mode 100644
17939 index 0000000..5d5fdf7
17940 --- /dev/null
17941 +++ b/include/litmus/ce_domain.h
17942 @@ -0,0 +1,27 @@
17943 +#ifndef _LITMUS_CE_DOMAIN_H
17944 +#define _LITMUS_CE_DOMAIN_H
17945 +
17946 +/*
17947 + * Functions that the MC plugin needs to call through a domain pointer.
17948 + */
17949 +void ce_requeue(domain_t*, struct task_struct*);
17950 +struct task_struct* ce_peek_and_take_ready(domain_t*);
17951 +int ce_higher_prio(struct task_struct*, struct task_struct*);
17952 +
17953 +#ifdef CONFIG_MERGE_TIMERS
17954 +typedef void (*ce_timer_callback_t)(struct rt_event*);
17955 +#else
17956 +typedef enum hrtimer_restart (*ce_timer_callback_t)(struct hrtimer*);
17957 +#endif
17958 +
17959 +void ce_domain_init(domain_t*,
17960 +		raw_spinlock_t*,
17961 +		requeue_t,
17962 +		peek_ready_t,
17963 +		take_ready_t,
17964 +		preempt_needed_t,
17965 +		task_prio_t,
17966 +		struct ce_dom_data*,
17967 +		const int,
17968 +		ce_timer_callback_t);
17969 +#endif
17970 diff --git a/include/litmus/clustered.h b/include/litmus/clustered.h
17971 new file mode 100644
17972 index 0000000..0c18dcb
17973 --- /dev/null
17974 +++ b/include/litmus/clustered.h
17975 @@ -0,0 +1,44 @@
17976 +#ifndef CLUSTERED_H
17977 +#define CLUSTERED_H
17978 +
17979 +/* Which cache level should be used to group CPUs into clusters?
17980 + * GLOBAL_CLUSTER means that all CPUs form a single cluster (just like under
17981 + * global scheduling).
17982 + */
17983 +enum cache_level {
17984 +	GLOBAL_CLUSTER = 0,
17985 +	L1_CLUSTER     = 1,
17986 +	L2_CLUSTER     = 2,
17987 +	L3_CLUSTER     = 3
17988 +};
17989 +
17990 +int parse_cache_level(const char *str, enum cache_level *level);
17991 +const char* cache_level_name(enum cache_level level);
17992 +
17993 +/* expose a cache level in a /proc dir */
17994 +struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent,
17995 +					   enum cache_level* level);
17996 +
17997 +
17998 +
17999 +struct scheduling_cluster {
18000 +	unsigned int id;
18001 +	/* list of CPUs that are part of this cluster */
18002 +	struct list_head cpus;
18003 +};
18004 +
18005 +struct cluster_cpu {
18006 +	unsigned int id; /* which CPU is this? */
18007 +	struct list_head cluster_list; /* List of the CPUs in this cluster. */
18008 +	struct scheduling_cluster* cluster; /* The cluster that this CPU belongs to. */
18009 +};
18010 +
18011 +int get_cluster_size(enum cache_level level);
18012 +
18013 +int assign_cpus_to_clusters(enum cache_level level,
18014 +			    struct scheduling_cluster* clusters[],
18015 +			    unsigned int num_clusters,
18016 +			    struct cluster_cpu* cpus[],
18017 +			    unsigned int num_cpus);
18018 +
18019 +#endif
18020 diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h
18021 index 48d086d..c6c5e21 100644
18022 --- a/include/litmus/debug_trace.h
18023 +++ b/include/litmus/debug_trace.h
18024 @@ -26,7 +26,6 @@ extern atomic_t __log_seq_no;
18025  #define TRACE(fmt, args...)						\
18026  	sched_trace_log_message(TRACE_PREFIX fmt,			\
18027  				TRACE_ARGS,  ## args)
18028 -
18029  #define TRACE_TASK(t, fmt, args...)			\
18030  	TRACE("(%s/%d:%d) " fmt, (t)->comm, (t)->pid,	\
18031  	      (t)->rt_param.job_params.job_no,  ##args)
18032 diff --git a/include/litmus/domain.h b/include/litmus/domain.h
18033 new file mode 100644
18034 index 0000000..d16ed18
18035 --- /dev/null
18036 +++ b/include/litmus/domain.h
18037 @@ -0,0 +1,50 @@
18038 +/**
18039 + * --Todo--
18040 + * Naming: this should become rt_domain while the old rt_domain should be
18041 + * changed to sd_domain (sporadic) or pd_domain (periodic).
18042 + * task_new: need to add and use this method
18043 + */
18044 +#ifndef _LITMUS_DOMAIN_H_
18045 +#define _LITMUS_DOMAIN_H_
18046 +
18047 +struct domain;
18048 +
18049 +typedef void (*requeue_t)(struct domain*, struct task_struct*);
18050 +typedef void (*remove_t)(struct domain*, struct task_struct*);
18051 +typedef struct task_struct* (*peek_ready_t)(struct domain*);
18052 +typedef struct task_struct* (*take_ready_t)(struct domain*);
18053 +typedef int (*preempt_needed_t)(struct domain*, struct task_struct*);
18054 +typedef int (*task_prio_t)(struct task_struct*, struct task_struct*);
18055 +
18056 +typedef struct domain {
18057 +	raw_spinlock_t*		lock; /* for coarse serialization     	*/
18058 +	struct list_head	list; /* list membership              	*/
18059 +	void*			data; /* implementation-specific data	*/
18060 +	char*			name; /* for debugging 		      	*/
18061 +
18062 +	/* add a task to the domain */
18063 +	requeue_t		requeue;
18064 +	/* prevent a task from being returned by the domain */
18065 +	remove_t		remove;
18066 +	/* return next ready task */
18067 +	peek_ready_t		peek_ready;
18068 +	/* remove and return next ready task */
18069 +	take_ready_t		take_ready;
18070 +	/* return true if the domain has a task which should preempt the
18071 +	 * task given
18072 +	 */
18073 +	preempt_needed_t	preempt_needed;
18074 +	/* for tasks within this domain, returns true if the first has
18075 +	 * has a higher priority than the second
18076 +	 */
18077 +	task_prio_t		higher_prio;
18078 +} domain_t;
18079 +
18080 +void domain_init(domain_t *dom,
18081 +		 raw_spinlock_t *lock,
18082 +		 requeue_t requeue,
18083 +		 peek_ready_t peek_ready,
18084 +		 take_ready_t take_ready,
18085 +		 preempt_needed_t preempt_needed,
18086 +		 task_prio_t priority);
18087 +#endif
18088 diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
18089 index 80d4321..89dca48 100644
18090 --- a/include/litmus/edf_common.h
18091 +++ b/include/litmus/edf_common.h
18092 @@ -18,10 +18,22 @@ void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
18093  int edf_higher_prio(struct task_struct* first,
18094  		    struct task_struct* second);
18095  
18096 +int gel_higher_prio(struct task_struct* first,
18097 +		    struct task_struct* second);
18098 +
18099 +lt_t get_priority_point(struct task_struct* t);
18100 +lt_t get_virt_priority_point(struct task_struct* t);
18101 +lt_t get_act_priority_point(struct task_struct* t);
18102 +
18103 +int gel_ready_order(struct bheap_node* a, struct bheap_node* b);
18104 +            
18105  int edf_ready_order(struct bheap_node* a, struct bheap_node* b);
18106  
18107  int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t);
18108  
18109 -int edf_set_hp_task(struct pi_semaphore *sem);
18110 -int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu);
18111 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
18112 +extern lt_t act_to_virt(rt_domain_t* rt, lt_t act);
18113 +extern lt_t virt_to_act(rt_domain_t* rt, lt_t virt);
18114 +#endif
18115 +
18116  #endif
18117 diff --git a/include/litmus/event_group.h b/include/litmus/event_group.h
18118 new file mode 100644
18119 index 0000000..b0654e0
18120 --- /dev/null
18121 +++ b/include/litmus/event_group.h
18122 @@ -0,0 +1,91 @@
18123 +#ifndef _LINUX_EVENT_QUEUE_H_
18124 +#define _LINUX_EVENT_QUEUE_H_
18125 +
18126 +#define EVENT_QUEUE_SLOTS 127 /* prime */
18127 +
18128 +#define NUM_EVENT_PRIORITIES 4 /* num crit levels really */
18129 +
18130 +struct rt_event;
18131 +typedef void (*fire_event_t)(struct rt_event *e);
18132 +
18133 +struct event_group {
18134 +	lt_t 			res;
18135 +	int 			cpu;
18136 +	struct list_head 	event_queue[EVENT_QUEUE_SLOTS];
18137 +	raw_spinlock_t 		queue_lock;
18138 +};
18139 +
18140 +/**
18141 + * A group of actions to fire at a given time
18142 + */
18143 +struct event_list {
18144 +	/* Use multiple list heads so that inserts are O(1) */
18145 +	struct list_head events[NUM_EVENT_PRIORITIES];
18146 +
18147 +	/* For timer firing */
18148 +	lt_t 				fire_time;
18149 +	struct hrtimer 			timer;
18150 +	struct hrtimer_start_on_info 	info;
18151 +
18152 +	struct list_head    queue_node;  /* For event_queue */
18153 +	struct event_group* group; /* For callback    */
18154 +};
18155 +
18156 +/**
18157 + * A single action to fire at a time
18158 + */
18159 +struct rt_event {
18160 +	/* Function to call on event expiration */
18161 +	fire_event_t 	 function;
18162 +	/* Priority of this event (lower is better) */
18163 +	int 		 prio;
18164 +
18165 +	/* For membership in the event_list */
18166 +	struct list_head 	events_node;
18167 +	/* To avoid runtime allocation. This is NOT necessarily
18168 +	 * the event_list containing this event. This is just a
18169 +	 * pre-allocated event list which can be used for merging
18170 +	 * events.
18171 +	 */
18172 +	struct event_list* 	event_list;
18173 +	/* Pointer set by add_event() so that we can cancel this event
18174 +	 * without knowing what group it is in (don't touch it).
18175 +	 */
18176 +	struct event_group*	_event_group;
18177 +};
18178 +
18179 +/**
18180 + * add_event() - Add timer to event group.
18181 + * @group	Group with which to merge event. If NULL, use the event
18182 + *		group of whatever CPU currently executing on.
18183 + * @e		Event to be fired at a specific time
18184 + * @time	Time to fire event
18185 + */
18186 +void add_event(struct event_group* group, struct rt_event* e, lt_t time);
18187 +
18188 +/**
18189 + * cancel_event() - Remove event from the group.
18190 + */
18191 +void cancel_event(struct rt_event*);
18192 +
18193 +/**
18194 + * init_event() - Create an event.
18195 + * @e		Event to create
18196 + * @prio 	Priority of the event (lower is better)
18197 + * @function	Function to fire when event expires
18198 + * @el		Pre-allocated event list for timer merging
18199 + */
18200 +void init_event(struct rt_event* e, int prio, fire_event_t function,
18201 +		struct event_list *el);
18202 +
18203 +struct event_list* event_list_alloc(int);
18204 +void event_list_free(struct event_list *el);
18205 +
18206 +/**
18207 + * get_event_group_for() - Get the event group for a CPU.
18208 + * @cpu		The CPU to get the event group for. Use NO_CPU to get the
18209 + *		event group of the CPU that the call is executing on.
18210 + */
18211 +struct event_group *get_event_group_for(const int cpu);
18212 +
18213 +#endif
18214 diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
18215 index 61f1b5b..caf2a1e 100644
18216 --- a/include/litmus/fdso.h
18217 +++ b/include/litmus/fdso.h
18218 @@ -33,17 +33,18 @@ struct inode_obj_id {
18219  	unsigned int		id;
18220  };
18221  
18222 +struct fdso_ops;
18223  
18224  struct od_table_entry {
18225  	unsigned int		used;
18226  
18227  	struct inode_obj_id*	obj;
18228 -	void*			extra;
18229 +	const struct fdso_ops*	class;
18230  };
18231  
18232  struct fdso_ops {
18233 -	void* (*create)	(void);
18234 -	void  (*destroy)(void*);
18235 +	int   (*create)(void** obj_ref, obj_type_t type, void* __user);
18236 +	void  (*destroy)(obj_type_t type, void*);
18237  	int   (*open)	(struct od_table_entry*, void* __user);
18238  	int   (*close)	(struct od_table_entry*);
18239  };
18240 @@ -51,14 +52,14 @@ struct fdso_ops {
18241  /* translate a userspace supplied od into the raw table entry
18242   * returns NULL if od is invalid
18243   */
18244 -struct od_table_entry* __od_lookup(int od);
18245 +struct od_table_entry* get_entry_for_od(int od);
18246  
18247  /* translate a userspace supplied od into the associated object
18248   * returns NULL if od is invalid
18249   */
18250  static inline void* od_lookup(int od, obj_type_t type)
18251  {
18252 -	struct od_table_entry* e = __od_lookup(od);
18253 +	struct od_table_entry* e = get_entry_for_od(od);
18254  	return e && e->obj->type == type ? e->obj->obj : NULL;
18255  }
18256  
18257 diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
18258 index 2464837..4f8353d 100644
18259 --- a/include/litmus/litmus.h
18260 +++ b/include/litmus/litmus.h
18261 @@ -26,6 +26,8 @@ static inline int in_list(struct list_head* list)
18262  		);
18263  }
18264  
18265 +struct task_struct* waitqueue_first(wait_queue_head_t *wq);
18266 +
18267  #define NO_CPU			0xffffffff
18268  
18269  void litmus_fork(struct task_struct *tsk);
18270 @@ -52,17 +54,23 @@ void litmus_exit_task(struct task_struct *tsk);
18271  #define get_partition(t) 	(tsk_rt(t)->task_params.cpu)
18272  #define get_deadline(t)		(tsk_rt(t)->job_params.deadline)
18273  #define get_release(t)		(tsk_rt(t)->job_params.release)
18274 +#define get_job_no(t)		(tsk_rt(t)->job_params.job_no)
18275  #define get_class(t)		(tsk_rt(t)->task_params.cls)
18276 +#define get_task_domain(t)	(tsk_rt(t)->_domain)
18277 +#define get_task_rdomain(t) (tsk_rt(t)->domain)
18278 +
18279 +#define is_priority_boosted(t)	(tsk_rt(t)->priority_boosted)
18280 +#define get_boost_start(t)	(tsk_rt(t)->boost_start_time)
18281  
18282  inline static int budget_exhausted(struct task_struct* t)
18283  {
18284 -	return get_exec_time(t) >= get_exec_cost(t);
18285 +	return (get_exec_time(t)) >= get_exec_cost(t);
18286  }
18287  
18288  inline static lt_t budget_remaining(struct task_struct* t)
18289  {
18290  	if (!budget_exhausted(t))
18291 -		return get_exec_cost(t) - get_exec_time(t);
18292 +		return max((lt_t)0, get_exec_cost(t) - get_exec_time(t));
18293  	else
18294  		/* avoid overflow */
18295  		return 0;
18296 @@ -74,11 +82,11 @@ inline static lt_t budget_remaining(struct task_struct* t)
18297  				      == PRECISE_ENFORCEMENT)
18298  
18299  #define is_hrt(t)     		\
18300 -	(tsk_rt(t)->task_params.class == RT_CLASS_HARD)
18301 +	(tsk_rt(t)->task_params.cls == RT_CLASS_HARD)
18302  #define is_srt(t)     		\
18303 -	(tsk_rt(t)->task_params.class == RT_CLASS_SOFT)
18304 +	(tsk_rt(t)->task_params.cls == RT_CLASS_SOFT)
18305  #define is_be(t)      		\
18306 -	(tsk_rt(t)->task_params.class == RT_CLASS_BEST_EFFORT)
18307 +	(tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT)
18308  
18309  /* Our notion of time within LITMUS: kernel monotonic time. */
18310  static inline lt_t litmus_clock(void)
18311 @@ -115,7 +123,7 @@ static inline lt_t litmus_clock(void)
18312  
18313  void preempt_if_preemptable(struct task_struct* t, int on_cpu);
18314  
18315 -#ifdef CONFIG_SRP
18316 +#ifdef CONFIG_LITMUS_LOCKING
18317  void srp_ceiling_block(void);
18318  #else
18319  #define srp_ceiling_block() /* nothing */
18320 @@ -123,6 +131,16 @@ void srp_ceiling_block(void);
18321  
18322  #define bheap2task(hn) ((struct task_struct*) hn->value)
18323  
18324 +static inline struct control_page* get_control_page(struct task_struct *t)
18325 +{
18326 +	return tsk_rt(t)->ctrl_page;
18327 +}
18328 +
18329 +static inline int has_control_page(struct task_struct* t)
18330 +{
18331 +	return tsk_rt(t)->ctrl_page != NULL;
18332 +}
18333 +
18334  #ifdef CONFIG_NP_SECTION
18335  
18336  static inline int is_kernel_np(struct task_struct *t)
18337 @@ -238,4 +256,8 @@ static inline quanta_t time2quanta(lt_t time, enum round round)
18338  /* By how much is cpu staggered behind CPU 0? */
18339  u64 cpu_stagger_offset(int cpu);
18340  
18341 +#define TS_SYSCALL_IN_START						\
18342 +	if (has_control_page(current))					\
18343 +		__TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start);
18344 +
18345  #endif
18346 diff --git a/include/litmus/litmus_proc.h b/include/litmus/litmus_proc.h
18347 index fbc0082..6800e72 100644
18348 --- a/include/litmus/litmus_proc.h
18349 +++ b/include/litmus/litmus_proc.h
18350 @@ -17,3 +17,9 @@ long make_plugin_proc_dir(struct sched_plugin* plugin,
18351   */
18352  void remove_plugin_proc_dir(struct sched_plugin* plugin);
18353  
18354 +
18355 +/* Copy at most size-1 bytes from ubuf into kbuf, null-terminate buf, and
18356 + * remove a '\n' if present. Returns the number of bytes that were read or
18357 + * -EFAULT. */
18358 +int copy_and_chomp(char *kbuf, unsigned long ksize,
18359 +		   __user const char* ubuf, unsigned long ulength);
18360 diff --git a/include/litmus/locking.h b/include/litmus/locking.h
18361 new file mode 100644
18362 index 0000000..4d7b870
18363 --- /dev/null
18364 +++ b/include/litmus/locking.h
18365 @@ -0,0 +1,28 @@
18366 +#ifndef LITMUS_LOCKING_H
18367 +#define LITMUS_LOCKING_H
18368 +
18369 +struct litmus_lock_ops;
18370 +
18371 +/* Generic base struct for LITMUS^RT userspace semaphores.
18372 + * This structure should be embedded in protocol-specific semaphores.
18373 + */
18374 +struct litmus_lock {
18375 +	struct litmus_lock_ops *ops;
18376 +	int type;
18377 +};
18378 +
18379 +struct litmus_lock_ops {
18380 +	/* Current task tries to obtain / drop a reference to a lock.
18381 +	 * Optional methods, allowed by default. */
18382 +	int (*open)(struct litmus_lock*, void* __user);
18383 +	int (*close)(struct litmus_lock*);
18384 +
18385 +	/* Current tries to lock/unlock this lock (mandatory methods). */
18386 +	int (*lock)(struct litmus_lock*);
18387 +	int (*unlock)(struct litmus_lock*);
18388 +
18389 +	/* The lock is no longer being referenced (mandatory method). */
18390 +	void (*deallocate)(struct litmus_lock*);
18391 +};
18392 +
18393 +#endif
18394 diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
18395 index 260c6fe..29bf9bc 100644
18396 --- a/include/litmus/preempt.h
18397 +++ b/include/litmus/preempt.h
18398 @@ -8,11 +8,13 @@
18399  
18400  #include <litmus/debug_trace.h>
18401  
18402 +
18403  extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
18404  
18405  #ifdef CONFIG_DEBUG_KERNEL
18406  const char* sched_state_name(int s);
18407 -#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args)
18408 +//#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args)
18409 +#define TRACE_STATE(fmt, args...) /* ignore */
18410  #else
18411  #define TRACE_STATE(fmt, args...) /* ignore */
18412  #endif
18413 diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
18414 index ac24929..7386645 100644
18415 --- a/include/litmus/rt_domain.h
18416 +++ b/include/litmus/rt_domain.h
18417 @@ -6,14 +6,36 @@
18418  #define __UNC_RT_DOMAIN_H__
18419  
18420  #include <litmus/bheap.h>
18421 +#include <litmus/domain.h>
18422 +#include <litmus/event_group.h>
18423 +#include <litmus/sched_mc.h>
18424  
18425  #define RELEASE_QUEUE_SLOTS 127 /* prime */
18426 +#define START_JOB_NO 3
18427  
18428  struct _rt_domain;
18429  
18430  typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
18431  typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks);
18432  
18433 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
18434 +/**
18435 + * struct virt_timer - Virtual timer structure
18436 + * @vtimer      For release timers
18437 + * @last_act    The most recent actual time
18438 + * @last_virt   The corresponding virtual time
18439 + * @speed       The denominator of current speed of virtual time: 1/s(t)
18440 + */
18441 +struct virt_timer {
18442 +	lt_t                last_act;
18443 +    lt_t                last_virt;
18444 +	int					speed_num;
18445 +    int                 speed_denom;
18446 +	raw_spinlock_t		lock;
18447 +	struct _rt_domain*	rt;
18448 +};
18449 +#endif
18450 +
18451  struct release_queue {
18452  	/* each slot maintains a list of release heaps sorted
18453  	 * by release time */
18454 @@ -29,7 +51,10 @@ typedef struct _rt_domain {
18455  	raw_spinlock_t 			release_lock;
18456  	struct release_queue 		release_queue;
18457  
18458 -#ifdef CONFIG_RELEASE_MASTER
18459 +#if defined(CONFIG_MERGE_TIMERS)
18460 +	struct event_group*		event_group;
18461 +	int				prio;
18462 +#elif defined(CONFIG_RELEASE_MASTER)
18463  	int				release_master;
18464  #endif
18465  
18466 @@ -45,25 +70,45 @@ typedef struct _rt_domain {
18467  
18468  	/* how are tasks ordered in the ready queue? */
18469  	bheap_prio_t			order;
18470 +
18471 +	enum crit_level			level;
18472 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME    
18473 +    struct virt_timer*  virt_timer;
18474 +#endif    
18475  } rt_domain_t;
18476  
18477 +struct release_info {
18478 +    pid_t                   pid;
18479 +    unsigned int            job_no;
18480 +};
18481 +
18482  struct release_heap {
18483  	/* list_head for per-time-slot list */
18484  	struct list_head		list;
18485  	lt_t				release_time;
18486  	/* all tasks to be released at release_time */
18487  	struct bheap			heap;
18488 +
18489 +#ifdef CONFIG_MERGE_TIMERS
18490 +	/* used to merge timer calls */
18491 +	struct rt_event			event;
18492 +#else
18493  	/* used to trigger the release */
18494  	struct hrtimer			timer;
18495 +    struct hrtimer          vtimer;
18496  
18497  #ifdef CONFIG_RELEASE_MASTER
18498  	/* used to delegate releases */
18499  	struct hrtimer_start_on_info	info;
18500  #endif
18501 +#endif
18502  	/* required for the timer callback */
18503  	rt_domain_t*			dom;
18504  };
18505  
18506 +/* Extern functions for virtual timer. */
18507 +extern lt_t act_to_virt(rt_domain_t* rt, lt_t act);
18508 +extern lt_t virt_to_act(rt_domain_t* rt, lt_t virt);
18509  
18510  static inline struct task_struct* __next_ready(rt_domain_t* rt)
18511  {
18512 @@ -76,7 +121,15 @@ static inline struct task_struct* __next_ready(rt_domain_t* rt)
18513  
18514  void rt_domain_init(rt_domain_t *rt, bheap_prio_t order,
18515  		    check_resched_needed_t check,
18516 -		    release_jobs_t relase);
18517 +		    release_jobs_t release);
18518 +
18519 +void pd_domain_init(domain_t *dom,
18520 +		    rt_domain_t *rt,
18521 +		    bheap_prio_t order,
18522 +		    check_resched_needed_t check,
18523 +		    release_jobs_t release,
18524 +		    preempt_needed_t preempt_needed,
18525 +		    task_prio_t priority);
18526  
18527  void __add_ready(rt_domain_t* rt, struct task_struct *new);
18528  void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
18529 @@ -100,7 +153,7 @@ static inline struct task_struct* __peek_ready(rt_domain_t* rt)
18530  		return NULL;
18531  }
18532  
18533 -static inline int  is_queued(struct task_struct *t)
18534 +static inline int is_queued(struct task_struct *t)
18535  {
18536  	BUG_ON(!tsk_rt(t)->heap_node);
18537  	return bheap_node_in_heap(tsk_rt(t)->heap_node);
18538 diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
18539 index a7a183f..2f60be9 100644
18540 --- a/include/litmus/rt_param.h
18541 +++ b/include/litmus/rt_param.h
18542 @@ -33,6 +33,24 @@ typedef enum {
18543  	PRECISE_ENFORCEMENT  /* NOT IMPLEMENTED - enforced with hrtimers */
18544  } budget_policy_t;
18545  
18546 +/** struct report_data - report data to monitoring program
18547 + * @seq             Sequence
18548 + * @pid             Task pid
18549 + * @release_time    Release time
18550 + * @priority_point  Priority point
18551 + * @now             Current time
18552 + * @rq_empty        Level-C ready queue status
18553 + */
18554 +struct report_data {
18555 +    unsigned long long  seq;
18556 +    int                 pid;
18557 +    unsigned int        job_no;
18558 +    lt_t                release_time;
18559 +    lt_t                priority_point;
18560 +    lt_t                now;
18561 +    int                 rq_empty;
18562 +};
18563 +
18564  struct rt_task {
18565  	lt_t 		exec_cost;
18566  	lt_t 		period;
18567 @@ -40,6 +58,7 @@ struct rt_task {
18568  	unsigned int	cpu;
18569  	task_class_t	cls;
18570  	budget_policy_t budget_policy; /* ignored by pfair */
18571 +	lt_t		relative_pp; /* for GEL-v scheduler */
18572  };
18573  
18574  /* The definition of the data that is shared between the kernel and real-time
18575 @@ -63,6 +82,9 @@ struct control_page {
18576  	 * its non-preemptive section? */
18577  	int delayed_preemption;
18578  
18579 +	/* locking overhead tracing: time stamp prior to system call */
18580 +	uint64_t ts_syscall_start; /* Feather-Trace cycles */
18581 +
18582  	/* to be extended */
18583  };
18584  
18585 @@ -72,6 +94,7 @@ struct control_page {
18586  struct _rt_domain;
18587  struct bheap_node;
18588  struct release_heap;
18589 +struct domain;
18590  
18591  struct rt_job {
18592  	/* Time instant the the job was or will be released.  */
18593 @@ -79,6 +102,9 @@ struct rt_job {
18594  	/* What is the current deadline? */
18595  	lt_t   	deadline;
18596  
18597 +	lt_t	real_release;
18598 +	lt_t	real_deadline;
18599 +
18600  	/* How much service has this job received so far? */
18601  	lt_t	exec_time;
18602  
18603 @@ -90,9 +116,18 @@ struct rt_job {
18604  	 * Increase this sequence number when a job is released.
18605  	 */
18606  	unsigned int    job_no;
18607 +    
18608 +    /* virtual and actual priority point */
18609 +    unsigned int    placeholder;
18610 +    lt_t            virt_priority_point;
18611 +    lt_t            act_priority_point;
18612 +    lt_t            act_last_release;
18613  };
18614  
18615  struct pfair_param;
18616 +#ifdef CONFIG_PLUGIN_MC
18617 +struct mc_data;
18618 +#endif
18619  
18620  /*	RT task parameters for scheduling extensions
18621   *	These parameters are inherited during clone and therefore must
18622 @@ -108,6 +143,21 @@ struct rt_param {
18623  	/* is the task present? (true if it can be scheduled) */
18624  	unsigned int		present:1;
18625  
18626 +#ifdef CONFIG_LITMUS_LOCKING
18627 +	/* Is the task being priority-boosted by a locking protocol? */
18628 +	unsigned int		priority_boosted:1;
18629 +	/* If so, when did this start? */
18630 +	lt_t			boost_start_time;
18631 +#endif
18632 +
18633 +#ifdef CONFIG_PLUGIN_MC
18634 +	/* mixed criticality specific data */
18635 +	struct mc_data *mc_data;
18636 +#endif
18637 +#ifdef CONFIG_MERGE_TIMERS
18638 +	struct rt_event *event;
18639 +#endif
18640 +
18641  	/* user controlled parameters */
18642  	struct rt_task 		task_params;
18643  
18644 @@ -162,6 +212,9 @@ struct rt_param {
18645  	int old_policy;
18646  	int old_prio;
18647  
18648 +	/* TODO: rename */
18649 +	struct domain *_domain;
18650 +
18651  	/* ready queue for this task */
18652  	struct _rt_domain* domain;
18653  
18654 diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h
18655 new file mode 100644
18656 index 0000000..39c7dc5
18657 --- /dev/null
18658 +++ b/include/litmus/sched_mc.h
18659 @@ -0,0 +1,152 @@
18660 +#ifndef _LINUX_SCHED_MC_H_
18661 +#define _LINUX_SCHED_MC_H_
18662 +
18663 +/* criticality levels */
18664 +enum crit_level {
18665 +	/* probably don't need to assign these (paranoid) */
18666 +	CRIT_LEVEL_A = 0,
18667 +	CRIT_LEVEL_B = 1,
18668 +	CRIT_LEVEL_C = 2,
18669 +	NUM_CRIT_LEVELS = 3,
18670 +};
18671 +
18672 +struct mc_task {
18673 +	enum crit_level crit;
18674 +	int lvl_a_id;
18675 +	int lvl_a_eligible;
18676 +};
18677 +
18678 +struct mc_job {
18679 +	int is_ghost:1;
18680 +	lt_t ghost_budget;
18681 +};
18682 +
18683 +#ifdef __KERNEL__
18684 +/*
18685 + * These are used only in the kernel. Userspace programs like RTSpin won't see
18686 + * them.
18687 + */
18688 +struct mc_data {
18689 +	struct mc_task mc_task;
18690 +	struct mc_job mc_job;
18691 +};
18692 +
18693 +#define tsk_mc_data(t)	 (tsk_rt(t)->mc_data)
18694 +#define tsk_mc_crit(t)	 (tsk_mc_data(t)->mc_task.crit)
18695 +#define is_ghost(t)	 (tsk_mc_data(t)->mc_job.is_ghost)
18696 +
18697 +#define TS "(%s/%d:%d:%s)"
18698 +#define TA(t) (t) ? tsk_mc_data(t) ? is_ghost(t) ? "ghost" : t->comm \
18699 +						 : t->comm : "NULL", \
18700 +	      (t) ? t->pid : 1,					\
18701 +	      (t) ? t->rt_param.job_params.job_no : 1,		\
18702 +	      (t && get_task_domain(t)) ? get_task_domain(t)->name : ""
18703 +
18704 +#define STRACE(fmt, args...) \
18705 +	sched_trace_log_message("%d P%d      [%s@%s:%d]: " fmt,	\
18706 +				TRACE_ARGS,  ## args)
18707 +/*
18708 +#define STRACE(fmt, args...) \
18709 +	printk(KERN_DEBUG "P%d : " fmt,	\
18710 +				raw_smp_processor_id(),  ## args)
18711 +*/
18712 +#define TRACE_MC_TASK(t, fmt, args...)				\
18713 +	STRACE(TS " " fmt, TA(t), ##args)
18714 +
18715 +/*
18716 + * The MC-CE scheduler uses this as domain data.
18717 + */
18718 +struct ce_dom_data {
18719 +	int cpu;
18720 +	struct task_struct *scheduled, *should_schedule;
18721 +#ifdef CONFIG_MERGE_TIMERS
18722 +	struct rt_event event;
18723 +#else
18724 +	struct hrtimer_start_on_info timer_info;
18725 +	struct hrtimer timer;
18726 +#endif
18727 +};
18728 +
18729 +/**
18730 + * enum crit_state - Logically add / remove CPUs from criticality levels.
18731 + *
18732 + * Global crit levels need to use a two step process to remove CPUs so
18733 + * that the CPUs can be removed without holding domain locks.
18734 + *
18735 + * @CS_ACTIVE	The criticality entry can run a task
18736 + * @CS_ACTIVATE The criticality entry can run a task, but hasn't had its
18737 + *		position updated in a global heap. Set with ONLY CPU lock.
18738 + * @CS_REMOVE   The criticality entry is logically removed, but hasn't had its
18739 + *		position adjusted in a global heap. This should be set when
18740 + *		ONLY the CPU state is locked.
18741 + * @CS_REMOVED	The criticality entry has been removed from the crit level
18742 + */
18743 +enum crit_state { CS_ACTIVE, CS_ACTIVATE, CS_REMOVE, CS_REMOVED };
18744 +
18745 +/**
18746 + * struct crit_entry - State of a CPU within each criticality level system.
18747 + * @level	Criticality level of this entry
18748 + * @linked	Logically running task, ghost or regular
18749 + * @domain	Domain from which to draw tasks
18750 + * @usable	False if a higher criticality task is running
18751 + * @event	For ghost task budget enforcement (merge timers)
18752 + * @timer	For ghost task budget enforcement (not merge timers)
18753 + * @node	Used to sort crit_entries by preemptability in global domains
18754 + */
18755 +struct crit_entry {
18756 +	enum crit_level		level;
18757 +	struct task_struct*	linked;
18758 +	struct domain*		domain;
18759 +	enum crit_state		state;
18760 +#ifdef CONFIG_MERGE_TIMERS
18761 +	struct rt_event		event;
18762 +#else
18763 +	struct hrtimer		timer;
18764 +#endif
18765 +	struct bheap_node*	node;
18766 +};
18767 +
18768 +/**
18769 + * struct domain_data - Wrap domains with related CPU state
18770 + * @domain	A domain for a criticality level
18771 + * @heap	The preemptable heap of crit entries (for global domains)
18772 + * @crit_entry	The crit entry for this domain (for partitioned domains)
18773 + */
18774 +struct domain_data {
18775 +	struct domain 		domain;
18776 +	struct bheap*		heap;
18777 +	struct crit_entry*	crit_entry;
18778 +};
18779 +
18780 +/*
18781 + * Functions that are used with the MC-CE plugin.
18782 + */
18783 +long mc_ce_set_domains(const int, struct domain_data*[]);
18784 +unsigned int mc_ce_get_expected_job(const int, const int);
18785 +
18786 +/*
18787 + * These functions are (lazily) inserted into the MC plugin code so that it
18788 + * manipulates the MC-CE state.
18789 + */
18790 +long mc_ce_admit_task_common(struct task_struct*);
18791 +void mc_ce_task_exit_common(struct task_struct*);
18792 +lt_t mc_ce_timer_callback_common(domain_t*);
18793 +void mc_ce_release_at_common(struct task_struct*, lt_t);
18794 +long mc_ce_activate_plugin_common(void);
18795 +long mc_ce_deactivate_plugin_common(void);
18796 +
18797 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
18798 +/* Functions for virtual timer. */
18799 +struct _rt_domain;
18800 +lt_t act_to_virt(struct _rt_domain* rt, lt_t act);
18801 +lt_t virt_to_act(struct _rt_domain* rt, lt_t virt);
18802 +void mc_release_crit_c_at(struct task_struct *t, lt_t start);
18803 +void mc_change_speed(int num, int denom);
18804 +void mc_set_monitor_pid(pid_t pid);
18805 +void send_signal_to_monitor(void);
18806 +void send_release_signal_to_monitor(void);
18807 +#endif
18808 +
18809 +#endif /* __KERNEL__ */
18810 +
18811 +#endif
18812 diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
18813 index 2d856d5..32c2397 100644
18814 --- a/include/litmus/sched_plugin.h
18815 +++ b/include/litmus/sched_plugin.h
18816 @@ -7,19 +7,11 @@
18817  
18818  #include <linux/sched.h>
18819  
18820 -/* struct for semaphore with priority inheritance */
18821 -struct pi_semaphore {
18822 -	atomic_t count;
18823 -	int sleepers;
18824 -	wait_queue_head_t wait;
18825 -	struct {
18826 -		/* highest-prio holder/waiter */
18827 -		struct task_struct *task;
18828 -		struct task_struct* cpu_task[NR_CPUS];
18829 -	} hp;
18830 -	/* current lock holder */
18831 -	struct task_struct *holder;
18832 -};
18833 +#ifdef CONFIG_LITMUS_LOCKING
18834 +#include <litmus/locking.h>
18835 +#endif
18836 +
18837 +struct litmus_lock;
18838  
18839  /************************ setup/tear down ********************/
18840  
18841 @@ -63,24 +55,10 @@ typedef void (*task_block_t)  (struct task_struct *task);
18842   */
18843  typedef void (*task_exit_t)    (struct task_struct *);
18844  
18845 -/* Called when the new_owner is released from the wait queue
18846 - * it should now inherit the priority from sem, _before_ it gets readded
18847 - * to any queue
18848 - */
18849 -typedef long (*inherit_priority_t) (struct pi_semaphore *sem,
18850 -				    struct task_struct *new_owner);
18851 -
18852 -/* Called when the current task releases a semahpore where it might have
18853 - * inherited a piority from
18854 - */
18855 -typedef long (*return_priority_t) (struct pi_semaphore *sem);
18856 -
18857 -/* Called when a task tries to acquire a semaphore and fails. Check if its
18858 - * priority is higher than that of the current holder.
18859 - */
18860 -typedef long (*pi_block_t) (struct pi_semaphore *sem, struct task_struct *t);
18861 -
18862 -
18863 +/* Called when the current task attempts to create a new lock of a given
18864 + * protocol type. */
18865 +typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type,
18866 +				 void* __user config);
18867  
18868  
18869  /********************* sys call backends  ********************/
18870 @@ -100,10 +78,6 @@ struct sched_plugin {
18871  	activate_plugin_t	activate_plugin;
18872  	deactivate_plugin_t	deactivate_plugin;
18873  
18874 -#ifdef CONFIG_SRP
18875 -	unsigned int		srp_active;
18876 -#endif
18877 -
18878  	/* 	scheduler invocation 	*/
18879  	scheduler_tick_t        tick;
18880  	schedule_t 		schedule;
18881 @@ -121,12 +95,9 @@ struct sched_plugin {
18882  	task_block_t		task_block;
18883  	task_exit_t 		task_exit;
18884  
18885 -#ifdef CONFIG_FMLP
18886 -	/*     priority inheritance 	*/
18887 -	unsigned int		fmlp_active;
18888 -	inherit_priority_t	inherit_priority;
18889 -	return_priority_t	return_priority;
18890 -	pi_block_t		pi_block;
18891 +#ifdef CONFIG_LITMUS_LOCKING
18892 +	/*	locking protocols	*/
18893 +	allocate_lock_t		allocate_lock;
18894  #endif
18895  } __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
18896  
18897 @@ -137,23 +108,6 @@ int register_sched_plugin(struct sched_plugin* plugin);
18898  struct sched_plugin* find_sched_plugin(const char* name);
18899  int print_sched_plugins(char* buf, int max);
18900  
18901 -static inline int srp_active(void)
18902 -{
18903 -#ifdef CONFIG_SRP
18904 -	return litmus->srp_active;
18905 -#else
18906 -	return 0;
18907 -#endif
18908 -}
18909 -static inline int fmlp_active(void)
18910 -{
18911 -#ifdef CONFIG_FMLP
18912 -	return litmus->fmlp_active;
18913 -#else
18914 -	return 0;
18915 -#endif
18916 -}
18917 -
18918  extern struct sched_plugin linux_sched_plugin;
18919  
18920  #endif
18921 diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
18922 index a5f7373..5cc05be 100644
18923 --- a/include/litmus/sched_trace.h
18924 +++ b/include/litmus/sched_trace.h
18925 @@ -23,7 +23,9 @@ struct st_param_data {		/* regular params */
18926  	u32	period;
18927  	u32	phase;
18928  	u8	partition;
18929 -	u8	__unused[3];
18930 +	u8	class;
18931 +	u8	level;
18932 +	u8	__unused[1];
18933  };
18934  
18935  struct st_release_data {	/* A job is was/is going to be released. */
18936 @@ -34,12 +36,13 @@ struct st_release_data {	/* A job is was/is going to be released. */
18937  struct st_assigned_data {	/* A job was asigned to a CPU. 		 */
18938  	u64	when;
18939  	u8	target;		/* Where should it execute?	         */
18940 -	u8	__unused[3];
18941 +	u8	__unused[7];
18942  };
18943  
18944  struct st_switch_to_data {	/* A process was switched to on a given CPU.   */
18945  	u64	when;		/* When did this occur?                        */
18946  	u32	exec_time;	/* Time the current job has executed.          */
18947 +	u8	__unused[4];
18948  
18949  };
18950  
18951 @@ -54,7 +57,7 @@ struct st_completion_data {	/* A job completed. */
18952  				 * next task automatically; set to 0 otherwise.
18953  				 */
18954  	u8	__uflags:7;
18955 -	u8	__unused[3];
18956 +	u8	__unused[7];
18957  };
18958  
18959  struct st_block_data {		/* A task blocks. */
18960 @@ -67,6 +70,12 @@ struct st_resume_data {		/* A task resumes. */
18961  	u64	__unused;
18962  };
18963  
18964 +struct st_action_data {
18965 +	u64	when;
18966 +	u32	action;
18967 +	u8	__unused[4];
18968 +};
18969 +
18970  struct st_sys_release_data {
18971  	u64	when;
18972  	u64	release;
18973 @@ -85,7 +94,8 @@ typedef enum {
18974  	ST_COMPLETION,
18975  	ST_BLOCK,
18976  	ST_RESUME,
18977 -	ST_SYS_RELEASE,
18978 +	ST_ACTION,
18979 +	ST_SYS_RELEASE
18980  } st_event_record_type_t;
18981  
18982  struct st_event_record {
18983 @@ -102,8 +112,8 @@ struct st_event_record {
18984  		DATA(completion);
18985  		DATA(block);
18986  		DATA(resume);
18987 +		DATA(action);
18988  		DATA(sys_release);
18989 -
18990  	} data;
18991  };
18992  
18993 @@ -140,8 +150,12 @@ feather_callback void do_sched_trace_task_block(unsigned long id,
18994  						struct task_struct* task);
18995  feather_callback void do_sched_trace_task_resume(unsigned long id,
18996  						 struct task_struct* task);
18997 +feather_callback void do_sched_trace_action(unsigned long id,
18998 +					    struct task_struct* task,
18999 +					    unsigned long action);
19000  feather_callback void do_sched_trace_sys_release(unsigned long id,
19001  						 lt_t* start);
19002 +
19003  #endif
19004  
19005  #else
19006 @@ -172,9 +186,19 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
19007  	SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t)
19008  #define sched_trace_task_resume(t) \
19009  	SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t)
19010 +#define sched_trace_action(t, action) \
19011 +	SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, do_sched_trace_action, t, \
19012 +		     (unsigned long) action);
19013  /* when is a pointer, it does not need an explicit cast to unsigned long */
19014  #define sched_trace_sys_release(when) \
19015 -	SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when)
19016 +	SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when)
19017 +
19018 +
19019 +#define QT_START lt_t _qt_start = litmus_clock()
19020 +#define QT_END \
19021 +	sched_trace_log_message("%d P%d      [%s@%s:%d]: Took %llu\n\n", \
19022 +		TRACE_ARGS, litmus_clock() - _qt_start)
19023 +
19024  
19025  #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
19026  
19027 diff --git a/include/litmus/srp.h b/include/litmus/srp.h
19028 new file mode 100644
19029 index 0000000..c9a4552
19030 --- /dev/null
19031 +++ b/include/litmus/srp.h
19032 @@ -0,0 +1,28 @@
19033 +#ifndef LITMUS_SRP_H
19034 +#define LITMUS_SRP_H
19035 +
19036 +struct srp_semaphore;
19037 +
19038 +struct srp_priority {
19039 +	struct list_head	list;
19040 +        unsigned int 		priority;
19041 +	pid_t			pid;
19042 +};
19043 +#define list2prio(l) list_entry(l, struct srp_priority, list)
19044 +
19045 +/* struct for uniprocessor SRP "semaphore" */
19046 +struct srp_semaphore {
19047 +	struct litmus_lock litmus_lock;
19048 +	struct srp_priority ceiling;
19049 +	struct task_struct* owner;
19050 +	int cpu; /* cpu associated with this "semaphore" and resource */
19051 +};
19052 +
19053 +/* map a task to its SRP preemption level priority */
19054 +typedef unsigned int (*srp_prioritization_t)(struct task_struct* t);
19055 +/* Must be updated by each plugin that uses SRP.*/
19056 +extern srp_prioritization_t get_srp_prio;
19057 +
19058 +struct srp_semaphore* allocate_srp_semaphore(void);
19059 +
19060 +#endif
19061 diff --git a/include/litmus/trace.h b/include/litmus/trace.h
19062 index b32c711..c1bbf16 100644
19063 --- a/include/litmus/trace.h
19064 +++ b/include/litmus/trace.h
19065 @@ -12,7 +12,10 @@
19066  enum task_type_marker {
19067  	TSK_BE,
19068  	TSK_RT,
19069 -	TSK_UNKNOWN
19070 +	TSK_UNKNOWN,
19071 +	TSK_LVLA,
19072 +	TSK_LVLB,
19073 +	TSK_LVLC
19074  };
19075  
19076  struct timestamp {
19077 @@ -28,7 +31,8 @@ feather_callback void save_timestamp(unsigned long event);
19078  feather_callback void save_timestamp_def(unsigned long event, unsigned long type);
19079  feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr);
19080  feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu);
19081 -
19082 +feather_callback void save_task_latency(unsigned long event, unsigned long when_ptr);
19083 +feather_callback void save_timestamp_time(unsigned long event, unsigned long time_ptr);
19084  
19085  #define TIMESTAMP(id) ft_event0(id, save_timestamp)
19086  
19087 @@ -40,6 +44,14 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
19088  #define CTIMESTAMP(id, cpu) \
19089  	ft_event1(id, save_timestamp_cpu, (unsigned long) cpu)
19090  
19091 +#define LTIMESTAMP(id, task) \
19092 +	ft_event1(id, save_task_latency, (unsigned long) task)
19093 +
19094 +#define TIMESTAMP_TIME(id, time_ptr) \
19095 +	ft_event1(id, save_timestamp_time, (unsigned long) time_ptr)
19096 +
19097 +#define TIMESTAMP_PID(id) ft_event0(id, save_timestamp_pid)
19098 +
19099  #else /* !CONFIG_SCHED_OVERHEAD_TRACE */
19100  
19101  #define TIMESTAMP(id)        /* no tracing */
19102 @@ -50,6 +62,12 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
19103  
19104  #define CTIMESTAMP(id, cpu)  /* no tracing */
19105  
19106 +#define LTIMESTAMP(id, when_ptr) /* no tracing */
19107 +
19108 +#define TIMESTAMP_TIME(id, time_ptr) /* no tracing */
19109 +
19110 +#define TIMESTAMP_PID(id) /* no tracing */
19111 +
19112  #endif
19113  
19114  
19115 @@ -61,6 +79,21 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
19116   * always the next number after the start time event id.
19117   */
19118  
19119 +#define __TS_SYSCALL_IN_START(p)	TIMESTAMP_TIME(10, p)
19120 +#define TS_SYSCALL_IN_END		TIMESTAMP_PID(11)
19121 +
19122 +#define TS_SYSCALL_OUT_START		TIMESTAMP_PID(20)
19123 +#define TS_SYSCALL_OUT_END		TIMESTAMP_PID(21)
19124 +
19125 +#define TS_LOCK_START			TIMESTAMP_PID(30)
19126 +#define TS_LOCK_END			TIMESTAMP_PID(31)
19127 +
19128 +#define TS_LOCK_SUSPEND			TIMESTAMP_PID(38)
19129 +#define TS_LOCK_RESUME			TIMESTAMP_PID(39)
19130 +
19131 +#define TS_UNLOCK_START			TIMESTAMP_PID(40)
19132 +#define TS_UNLOCK_END			TIMESTAMP_PID(41)
19133 +
19134  #define TS_SCHED_START			DTIMESTAMP(100, TSK_UNKNOWN) /* we only
19135  								      * care
19136  								      * about
19137 @@ -78,6 +111,26 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
19138  #define TS_TICK_START(t)		TTIMESTAMP(110, t)
19139  #define TS_TICK_END(t) 			TTIMESTAMP(111, t)
19140  
19141 +#define TS_LVLA_RELEASE_START		DTIMESTAMP(112, TSK_RT)
19142 +#define TS_LVLA_RELEASE_END		DTIMESTAMP(113, TSK_RT)
19143 +
19144 +#define TS_LVLA_SCHED_START		DTIMESTAMP(114, TSK_UNKNOWN)
19145 +#define TS_LVLA_SCHED_END_ID		115
19146 +#define TS_LVLA_SCHED_END(t)		TTIMESTAMP(TS_LVLA_SCHED_END_ID, t)
19147 +
19148 +#define TS_LVLB_RELEASE_START		DTIMESTAMP(116, TSK_RT)
19149 +#define TS_LVLB_RELEASE_END		DTIMESTAMP(117, TSK_RT)
19150 +
19151 +#define TS_LVLB_SCHED_START		DTIMESTAMP(118, TSK_UNKNOWN)
19152 +#define TS_LVLB_SCHED_END_ID		119
19153 +#define TS_LVLB_SCHED_END(t)		TTIMESTAMP(TS_LVLB_SCHED_END_ID, t)
19154 +
19155 +#define TS_LVLC_RELEASE_START		DTIMESTAMP(120, TSK_RT)
19156 +#define TS_LVLC_RELEASE_END		DTIMESTAMP(121, TSK_RT)
19157 +
19158 +#define TS_LVLC_SCHED_START		DTIMESTAMP(122, TSK_UNKNOWN)
19159 +#define TS_LVLC_SCHED_END_ID		123
19160 +#define TS_LVLC_SCHED_END(t)		TTIMESTAMP(TS_LVLC_SCHED_END_ID, t)
19161  
19162  #define TS_PLUGIN_SCHED_START		/* TIMESTAMP(120) */  /* currently unused */
19163  #define TS_PLUGIN_SCHED_END		/* TIMESTAMP(121) */
19164 @@ -91,23 +144,9 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
19165  #define TS_EXIT_NP_START		TIMESTAMP(150)
19166  #define TS_EXIT_NP_END			TIMESTAMP(151)
19167  
19168 -#define TS_SRP_UP_START			TIMESTAMP(160)
19169 -#define TS_SRP_UP_END			TIMESTAMP(161)
19170 -#define TS_SRP_DOWN_START		TIMESTAMP(162)
19171 -#define TS_SRP_DOWN_END			TIMESTAMP(163)
19172 -
19173 -#define TS_PI_UP_START			TIMESTAMP(170)
19174 -#define TS_PI_UP_END			TIMESTAMP(171)
19175 -#define TS_PI_DOWN_START		TIMESTAMP(172)
19176 -#define TS_PI_DOWN_END			TIMESTAMP(173)
19177 -
19178 -#define TS_FIFO_UP_START		TIMESTAMP(180)
19179 -#define TS_FIFO_UP_END			TIMESTAMP(181)
19180 -#define TS_FIFO_DOWN_START		TIMESTAMP(182)
19181 -#define TS_FIFO_DOWN_END		TIMESTAMP(183)
19182 -
19183  #define TS_SEND_RESCHED_START(c)	CTIMESTAMP(190, c)
19184  #define TS_SEND_RESCHED_END		DTIMESTAMP(191, TSK_UNKNOWN)
19185  
19186 +#define TS_RELEASE_LATENCY(when)	LTIMESTAMP(208, &(when))
19187  
19188  #endif /* !_SYS_TRACE_H_ */
19189 diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
19190 index dbddc65..18bf00d 100644
19191 --- a/include/litmus/unistd_32.h
19192 +++ b/include/litmus/unistd_32.h
19193 @@ -10,14 +10,16 @@
19194  #define __NR_complete_job	__LSC(2)
19195  #define __NR_od_open		__LSC(3)
19196  #define __NR_od_close		__LSC(4)
19197 -#define __NR_fmlp_down		__LSC(5)
19198 -#define __NR_fmlp_up		__LSC(6)
19199 -#define __NR_srp_down		__LSC(7)
19200 -#define __NR_srp_up		__LSC(8)
19201 -#define __NR_query_job_no	__LSC(9)
19202 -#define __NR_wait_for_job_release __LSC(10)
19203 -#define __NR_wait_for_ts_release __LSC(11)
19204 -#define __NR_release_ts		__LSC(12)
19205 -#define __NR_null_call		__LSC(13)
19206 +#define __NR_litmus_lock       	__LSC(5)
19207 +#define __NR_litmus_unlock	__LSC(6)
19208 +#define __NR_query_job_no	__LSC(7)
19209 +#define __NR_wait_for_job_release __LSC(8)
19210 +#define __NR_wait_for_ts_release __LSC(9)
19211 +#define __NR_release_ts		__LSC(10)
19212 +#define __NR_null_call		__LSC(11)
19213 +#define __NR_set_rt_task_mc_param __LSC(12)
19214 +#define __NR_change_speed   __LSC(13)
19215 +#define __NR_register_pid   __LSC(14)
19216 +#define __NR_get_job_report __LSC(15)
19217  
19218 -#define NR_litmus_syscalls 14
19219 +#define NR_litmus_syscalls 16
19220 diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
19221 index f0618e7..d55f72f 100644
19222 --- a/include/litmus/unistd_64.h
19223 +++ b/include/litmus/unistd_64.h
19224 @@ -15,23 +15,27 @@ __SYSCALL(__NR_complete_job, sys_complete_job)
19225  __SYSCALL(__NR_od_open, sys_od_open)
19226  #define __NR_od_close				__LSC(4)
19227  __SYSCALL(__NR_od_close, sys_od_close)
19228 -#define __NR_fmlp_down				__LSC(5)
19229 -__SYSCALL(__NR_fmlp_down, sys_fmlp_down)
19230 -#define __NR_fmlp_up				__LSC(6)
19231 -__SYSCALL(__NR_fmlp_up, sys_fmlp_up)
19232 -#define __NR_srp_down				__LSC(7)
19233 -__SYSCALL(__NR_srp_down, sys_srp_down)
19234 -#define __NR_srp_up				__LSC(8)
19235 -__SYSCALL(__NR_srp_up, sys_srp_up)
19236 -#define __NR_query_job_no			__LSC(9)
19237 +#define __NR_litmus_lock	       		__LSC(5)
19238 +__SYSCALL(__NR_litmus_lock, sys_litmus_lock)
19239 +#define __NR_litmus_unlock	       		__LSC(6)
19240 +__SYSCALL(__NR_litmus_unlock, sys_litmus_unlock)
19241 +#define __NR_query_job_no			__LSC(7)
19242  __SYSCALL(__NR_query_job_no, sys_query_job_no)
19243 -#define __NR_wait_for_job_release		__LSC(10)
19244 +#define __NR_wait_for_job_release		__LSC(8)
19245  __SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release)
19246 -#define __NR_wait_for_ts_release		__LSC(11)
19247 +#define __NR_wait_for_ts_release		__LSC(9)
19248  __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
19249 -#define __NR_release_ts				__LSC(12)
19250 +#define __NR_release_ts				__LSC(10)
19251  __SYSCALL(__NR_release_ts, sys_release_ts)
19252 -#define __NR_null_call				__LSC(13)
19253 +#define __NR_null_call				__LSC(11)
19254  __SYSCALL(__NR_null_call, sys_null_call)
19255 +#define __NR_set_rt_task_mc_param               __LSC(12)
19256 +__SYSCALL(__NR_set_rt_task_mc_param, sys_set_rt_task_mc_param)
19257 +#define __NR_change_speed			__LSC(13)
19258 +__SYSCALL(__NR_change_speed, sys_change_speed)
19259 +#define __NR_register_pid			__LSC(14)
19260 +__SYSCALL(__NR_register_pid, sys_register_pid)
19261 +#define __NR_get_job_report			__LSC(15)
19262 +__SYSCALL(__NR_get_job_report, sys_get_job_report)
19263  
19264 -#define NR_litmus_syscalls 14
19265 +#define NR_litmus_syscalls 16
19266 diff --git a/include/media/saa7146.h b/include/media/saa7146.h
19267 index 7a9f76e..ac7ce00 100644
19268 --- a/include/media/saa7146.h
19269 +++ b/include/media/saa7146.h
19270 @@ -161,7 +161,7 @@ extern struct list_head saa7146_devices;
19271  extern struct mutex saa7146_devices_lock;
19272  int saa7146_register_extension(struct saa7146_extension*);
19273  int saa7146_unregister_extension(struct saa7146_extension*);
19274 -struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc);
19275 +struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc);
19276  int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt);
19277  void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt);
19278  int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
19279 diff --git a/include/net/af_unix.h b/include/net/af_unix.h
19280 index 90c9e28..18e5c3f 100644
19281 --- a/include/net/af_unix.h
19282 +++ b/include/net/af_unix.h
19283 @@ -10,6 +10,7 @@ extern void unix_inflight(struct file *fp);
19284  extern void unix_notinflight(struct file *fp);
19285  extern void unix_gc(void);
19286  extern void wait_for_unix_gc(void);
19287 +extern struct sock *unix_get_socket(struct file *filp);
19288  
19289  #define UNIX_HASH_SIZE	256
19290  
19291 @@ -56,6 +57,7 @@ struct unix_sock {
19292  	spinlock_t		lock;
19293  	unsigned int		gc_candidate : 1;
19294  	unsigned int		gc_maybe_cycle : 1;
19295 +	unsigned char		recursion_level;
19296  	struct socket_wq	peer_wq;
19297  };
19298  #define unix_sk(__sk) ((struct unix_sock *)__sk)
19299 diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
19300 index 2fd06c6..a1662a5 100644
19301 --- a/include/net/cfg80211.h
19302 +++ b/include/net/cfg80211.h
19303 @@ -1201,13 +1201,14 @@ struct cfg80211_ops {
19304   * 	initiator is %REGDOM_SET_BY_CORE).
19305   * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will
19306   *	ignore regulatory domain settings until it gets its own regulatory
19307 - *	domain via its regulatory_hint(). After its gets its own regulatory
19308 - *	domain it will only allow further regulatory domain settings to
19309 - *	further enhance compliance. For example if channel 13 and 14 are
19310 - *	disabled by this regulatory domain no user regulatory domain can
19311 - *	enable these channels at a later time. This can be used for devices
19312 - *	which do not have calibration information gauranteed for frequencies
19313 - *	or settings outside of its regulatory domain.
19314 + *	domain via its regulatory_hint() unless the regulatory hint is
19315 + *	from a country IE. After its gets its own regulatory domain it will
19316 + *	only allow further regulatory domain settings to further enhance
19317 + *	compliance. For example if channel 13 and 14 are disabled by this
19318 + *	regulatory domain no user regulatory domain can enable these channels
19319 + *	at a later time. This can be used for devices which do not have
19320 + *	calibration information guaranteed for frequencies or settings
19321 + *	outside of its regulatory domain.
19322   * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
19323   *	that passive scan flags and beaconing flags may not be lifted by
19324   *	cfg80211 due to regulatory beacon hints. For more information on beacon
19325 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
19326 index b0787a1..05aaa87 100644
19327 --- a/include/net/mac80211.h
19328 +++ b/include/net/mac80211.h
19329 @@ -315,6 +315,9 @@ struct ieee80211_bss_conf {
19330   * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame
19331   * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this
19332   *	frame and selects the maximum number of streams that it can use.
19333 + *
19334 + * Note: If you have to add new flags to the enumeration, then don't
19335 + *	 forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
19336   */
19337  enum mac80211_tx_control_flags {
19338  	IEEE80211_TX_CTL_REQ_TX_STATUS		= BIT(0),
19339 @@ -344,6 +347,19 @@ enum mac80211_tx_control_flags {
19340  
19341  #define IEEE80211_TX_CTL_STBC_SHIFT		23
19342  
19343 +/*
19344 + * This definition is used as a mask to clear all temporary flags, which are
19345 + * set by the tx handlers for each transmission attempt by the mac80211 stack.
19346 + */
19347 +#define IEEE80211_TX_TEMPORARY_FLAGS (IEEE80211_TX_CTL_NO_ACK |		      \
19348 +	IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT |    \
19349 +	IEEE80211_TX_CTL_SEND_AFTER_DTIM | IEEE80211_TX_CTL_AMPDU |	      \
19350 +	IEEE80211_TX_STAT_TX_FILTERED |	IEEE80211_TX_STAT_ACK |		      \
19351 +	IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_AMPDU_NO_BACK |	      \
19352 +	IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_PSPOLL_RESPONSE | \
19353 +	IEEE80211_TX_CTL_MORE_FRAMES | IEEE80211_TX_CTL_LDPC |		      \
19354 +	IEEE80211_TX_CTL_STBC)
19355 +
19356  /**
19357   * enum mac80211_rate_control_flags - per-rate flags set by the
19358   *	Rate Control algorithm.
19359 diff --git a/include/net/sock.h b/include/net/sock.h
19360 index adab9dc..6892a5e 100644
19361 --- a/include/net/sock.h
19362 +++ b/include/net/sock.h
19363 @@ -1155,6 +1155,8 @@ extern void sk_common_release(struct sock *sk);
19364  /* Initialise core socket variables */
19365  extern void sock_init_data(struct socket *sock, struct sock *sk);
19366  
19367 +extern void sk_filter_release_rcu(struct rcu_head *rcu);
19368 +
19369  /**
19370   *	sk_filter_release - release a socket filter
19371   *	@fp: filter to remove
19372 @@ -1165,7 +1167,7 @@ extern void sock_init_data(struct socket *sock, struct sock *sk);
19373  static inline void sk_filter_release(struct sk_filter *fp)
19374  {
19375  	if (atomic_dec_and_test(&fp->refcnt))
19376 -		kfree(fp);
19377 +		call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
19378  }
19379  
19380  static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
19381 diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
19382 index 8fcb6e0..d38b58e 100644
19383 --- a/include/scsi/scsi.h
19384 +++ b/include/scsi/scsi.h
19385 @@ -9,6 +9,7 @@
19386  #define _SCSI_SCSI_H
19387  
19388  #include <linux/types.h>
19389 +#include <linux/scatterlist.h>
19390  
19391  struct scsi_cmnd;
19392  
19393 diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
19394 index e8cbf43..75271b9 100644
19395 --- a/include/xen/interface/io/ring.h
19396 +++ b/include/xen/interface/io/ring.h
19397 @@ -24,8 +24,15 @@ typedef unsigned int RING_IDX;
19398   * A ring contains as many entries as will fit, rounded down to the nearest
19399   * power of two (so we can mask with (size-1) to loop around).
19400   */
19401 -#define __RING_SIZE(_s, _sz) \
19402 -    (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
19403 +#define __CONST_RING_SIZE(_s, _sz)				\
19404 +	(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) /	\
19405 +		sizeof(((struct _s##_sring *)0)->ring[0])))
19406 +
19407 +/*
19408 + * The same for passing in an actual pointer instead of a name tag.
19409 + */
19410 +#define __RING_SIZE(_s, _sz)						\
19411 +	(__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
19412  
19413  /*
19414   * Macros to make the correct C datatypes for a new kind of ring.
19415 diff --git a/init/calibrate.c b/init/calibrate.c
19416 index 6eb48e5..24fe022 100644
19417 --- a/init/calibrate.c
19418 +++ b/init/calibrate.c
19419 @@ -66,7 +66,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
19420  		pre_start = 0;
19421  		read_current_timer(&start);
19422  		start_jiffies = jiffies;
19423 -		while (jiffies <= (start_jiffies + 1)) {
19424 +		while (time_before_eq(jiffies, start_jiffies + 1)) {
19425  			pre_start = start;
19426  			read_current_timer(&start);
19427  		}
19428 @@ -74,8 +74,8 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
19429  
19430  		pre_end = 0;
19431  		end = post_start;
19432 -		while (jiffies <=
19433 -		       (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) {
19434 +		while (time_before_eq(jiffies, start_jiffies + 1 +
19435 +					       DELAY_CALIBRATION_TICKS)) {
19436  			pre_end = end;
19437  			read_current_timer(&end);
19438  		}
19439 diff --git a/ipc/compat.c b/ipc/compat.c
19440 index 9dc2c7d..845a287 100644
19441 --- a/ipc/compat.c
19442 +++ b/ipc/compat.c
19443 @@ -241,6 +241,8 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
19444  	struct semid64_ds __user *up64;
19445  	int version = compat_ipc_parse_version(&third);
19446  
19447 +	memset(&s64, 0, sizeof(s64));
19448 +
19449  	if (!uptr)
19450  		return -EINVAL;
19451  	if (get_user(pad, (u32 __user *) uptr))
19452 @@ -421,6 +423,8 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
19453  	int version = compat_ipc_parse_version(&second);
19454  	void __user *p;
19455  
19456 +	memset(&m64, 0, sizeof(m64));
19457 +
19458  	switch (second & (~IPC_64)) {
19459  	case IPC_INFO:
19460  	case IPC_RMID:
19461 @@ -594,6 +598,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
19462  	int err, err2;
19463  	int version = compat_ipc_parse_version(&second);
19464  
19465 +	memset(&s64, 0, sizeof(s64));
19466 +
19467  	switch (second & (~IPC_64)) {
19468  	case IPC_RMID:
19469  	case SHM_LOCK:
19470 diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c
19471 index d8d1e9f..380ea4f 100644
19472 --- a/ipc/compat_mq.c
19473 +++ b/ipc/compat_mq.c
19474 @@ -53,6 +53,9 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
19475  	void __user *p = NULL;
19476  	if (u_attr && oflag & O_CREAT) {
19477  		struct mq_attr attr;
19478 +
19479 +		memset(&attr, 0, sizeof(attr));
19480 +
19481  		p = compat_alloc_user_space(sizeof(attr));
19482  		if (get_compat_mq_attr(&attr, u_attr) ||
19483  		    copy_to_user(p, &attr, sizeof(attr)))
19484 @@ -127,6 +130,8 @@ asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
19485  	struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
19486  	long ret;
19487  
19488 +	memset(&mqstat, 0, sizeof(mqstat));
19489 +
19490  	if (u_mqstat) {
19491  		if (get_compat_mq_attr(&mqstat, u_mqstat) ||
19492  		    copy_to_user(p, &mqstat, sizeof(mqstat)))
19493 diff --git a/ipc/shm.c b/ipc/shm.c
19494 index 52ed77e..b427380 100644
19495 --- a/ipc/shm.c
19496 +++ b/ipc/shm.c
19497 @@ -473,6 +473,7 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_
19498  	    {
19499  		struct shmid_ds out;
19500  
19501 +		memset(&out, 0, sizeof(out));
19502  		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
19503  		out.shm_segsz	= in->shm_segsz;
19504  		out.shm_atime	= in->shm_atime;
19505 diff --git a/kernel/exit.c b/kernel/exit.c
19506 index b9d3bc6..ad7e514 100644
19507 --- a/kernel/exit.c
19508 +++ b/kernel/exit.c
19509 @@ -97,6 +97,14 @@ static void __exit_signal(struct task_struct *tsk)
19510  		sig->tty = NULL;
19511  	} else {
19512  		/*
19513 +		 * This can only happen if the caller is de_thread().
19514 +		 * FIXME: this is the temporary hack, we should teach
19515 +		 * posix-cpu-timers to handle this case correctly.
19516 +		 */
19517 +		if (unlikely(has_group_leader_pid(tsk)))
19518 +			posix_cpu_timers_exit_group(tsk);
19519 +
19520 +		/*
19521  		 * If there is any task waiting for the group exit
19522  		 * then notify it:
19523  		 */
19524 @@ -905,6 +913,15 @@ NORET_TYPE void do_exit(long code)
19525  	if (unlikely(!tsk->pid))
19526  		panic("Attempted to kill the idle task!");
19527  
19528 +	/*
19529 +	 * If do_exit is called because this processes oopsed, it's possible
19530 +	 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
19531 +	 * continuing. Amongst other possible reasons, this is to prevent
19532 +	 * mm_release()->clear_child_tid() from writing to a user-controlled
19533 +	 * kernel address.
19534 +	 */
19535 +	set_fs(USER_DS);
19536 +
19537  	tracehook_report_exit(&code);
19538  
19539  	validate_creds_for_do_exit(tsk);
19540 diff --git a/kernel/fork.c b/kernel/fork.c
19541 index ab7f29d..d01a751 100644
19542 --- a/kernel/fork.c
19543 +++ b/kernel/fork.c
19544 @@ -279,6 +279,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
19545  
19546  	setup_thread_stack(tsk, orig);
19547  	clear_user_return_notifier(tsk);
19548 +	clear_tsk_need_resched(tsk);
19549  	stackend = end_of_stack(tsk);
19550  	*stackend = STACK_END_MAGIC;	/* for overflow detection */
19551  
19552 diff --git a/kernel/futex.c b/kernel/futex.c
19553 index 6a3a5fa..e328f57 100644
19554 --- a/kernel/futex.c
19555 +++ b/kernel/futex.c
19556 @@ -1363,7 +1363,6 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
19557  {
19558  	struct futex_hash_bucket *hb;
19559  
19560 -	get_futex_key_refs(&q->key);
19561  	hb = hash_futex(&q->key);
19562  	q->lock_ptr = &hb->lock;
19563  
19564 @@ -1375,7 +1374,6 @@ static inline void
19565  queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
19566  {
19567  	spin_unlock(&hb->lock);
19568 -	drop_futex_key_refs(&q->key);
19569  }
19570  
19571  /**
19572 @@ -1480,8 +1478,6 @@ static void unqueue_me_pi(struct futex_q *q)
19573  	q->pi_state = NULL;
19574  
19575  	spin_unlock(q->lock_ptr);
19576 -
19577 -	drop_futex_key_refs(&q->key);
19578  }
19579  
19580  /*
19581 @@ -1812,7 +1808,10 @@ static int futex_wait(u32 __user *uaddr, int fshared,
19582  	}
19583  
19584  retry:
19585 -	/* Prepare to wait on uaddr. */
19586 +	/*
19587 +	 * Prepare to wait on uaddr. On success, holds hb lock and increments
19588 +	 * q.key refs.
19589 +	 */
19590  	ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
19591  	if (ret)
19592  		goto out;
19593 @@ -1822,24 +1821,23 @@ retry:
19594  
19595  	/* If we were woken (and unqueued), we succeeded, whatever. */
19596  	ret = 0;
19597 +	/* unqueue_me() drops q.key ref */
19598  	if (!unqueue_me(&q))
19599 -		goto out_put_key;
19600 +		goto out;
19601  	ret = -ETIMEDOUT;
19602  	if (to && !to->task)
19603 -		goto out_put_key;
19604 +		goto out;
19605  
19606  	/*
19607  	 * We expect signal_pending(current), but we might be the
19608  	 * victim of a spurious wakeup as well.
19609  	 */
19610 -	if (!signal_pending(current)) {
19611 -		put_futex_key(fshared, &q.key);
19612 +	if (!signal_pending(current))
19613  		goto retry;
19614 -	}
19615  
19616  	ret = -ERESTARTSYS;
19617  	if (!abs_time)
19618 -		goto out_put_key;
19619 +		goto out;
19620  
19621  	restart = &current_thread_info()->restart_block;
19622  	restart->fn = futex_wait_restart;
19623 @@ -1856,8 +1854,6 @@ retry:
19624  
19625  	ret = -ERESTART_RESTARTBLOCK;
19626  
19627 -out_put_key:
19628 -	put_futex_key(fshared, &q.key);
19629  out:
19630  	if (to) {
19631  		hrtimer_cancel(&to->timer);
19632 @@ -2236,7 +2232,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
19633  	q.rt_waiter = &rt_waiter;
19634  	q.requeue_pi_key = &key2;
19635  
19636 -	/* Prepare to wait on uaddr. */
19637 +	/*
19638 +	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
19639 +	 * count.
19640 +	 */
19641  	ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
19642  	if (ret)
19643  		goto out_key2;
19644 @@ -2254,7 +2253,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
19645  	 * In order for us to be here, we know our q.key == key2, and since
19646  	 * we took the hb->lock above, we also know that futex_requeue() has
19647  	 * completed and we no longer have to concern ourselves with a wakeup
19648 -	 * race with the atomic proxy lock acquition by the requeue code.
19649 +	 * race with the atomic proxy lock acquisition by the requeue code. The
19650 +	 * futex_requeue dropped our key1 reference and incremented our key2
19651 +	 * reference count.
19652  	 */
19653  
19654  	/* Check if the requeue code acquired the second futex for us. */
19655 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
19656 index cb49883..d6a15dd 100644
19657 --- a/kernel/hrtimer.c
19658 +++ b/kernel/hrtimer.c
19659 @@ -1053,6 +1053,7 @@ void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info)
19660  {
19661  	memset(info, 0, sizeof(struct hrtimer_start_on_info));
19662  	atomic_set(&info->state, HRTIMER_START_ON_INACTIVE);
19663 +	INIT_LIST_HEAD(&info->list);
19664  }
19665  
19666  /**
19667 @@ -1071,12 +1072,32 @@ void hrtimer_pull(void)
19668  	list_for_each_safe(pos, safe, &list) {
19669  		info = list_entry(pos, struct hrtimer_start_on_info, list);
19670  		TRACE("pulled timer 0x%x\n", info->timer);
19671 -		list_del(pos);
19672 -		hrtimer_start(info->timer, info->time, info->mode);
19673 +		list_del_init(pos);
19674 +		if (!info->timer) continue;
19675 +		if (atomic_read(&info->state) != HRTIMER_START_ON_INACTIVE)
19676 +			hrtimer_start(info->timer, info->time, info->mode);
19677 +		if (atomic_read(&info->state) == HRTIMER_START_ON_INACTIVE)
19678 +			hrtimer_cancel(info->timer);
19679  	}
19680  }
19681  
19682  /**
19683 + * hrtimer_pull_cancel - Cancel a remote timer pull
19684 + */
19685 +int hrtimer_pull_cancel(int cpu, struct hrtimer *timer,
19686 +			struct hrtimer_start_on_info *info)
19687 +{
19688 +	struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
19689 +
19690 +	raw_spin_lock(&base->lock);
19691 +	list_del_init(&info->list);
19692 +	raw_spin_unlock(&base->lock);
19693 +
19694 +	atomic_set(&info->state, HRTIMER_START_ON_INACTIVE);
19695 +	return hrtimer_try_to_cancel(timer);
19696 +}
19697 +
19698 +/**
19699   *  hrtimer_start_on - trigger timer arming on remote cpu
19700   *  @cpu:	remote cpu
19701   *  @info:	save timer information for enqueuing on remote cpu
19702 @@ -1085,8 +1106,8 @@ void hrtimer_pull(void)
19703   *  @mode:	timer mode
19704   */
19705  int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info,
19706 -		struct hrtimer *timer, ktime_t time,
19707 -		const enum hrtimer_mode mode)
19708 +		     struct hrtimer *timer, ktime_t time,
19709 +		     const enum hrtimer_mode mode)
19710  {
19711  	unsigned long flags;
19712  	struct hrtimer_cpu_base* base;
19713 @@ -1118,7 +1139,8 @@ int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info,
19714  			__hrtimer_start_range_ns(info->timer, info->time,
19715  						 0, info->mode, 0);
19716  		} else {
19717 -			TRACE("hrtimer_start_on: pulling to remote CPU\n");
19718 +			TRACE("hrtimer_start_on: pulling 0x%x to remote CPU\n",
19719 +			      info->timer);
19720  			base = &per_cpu(hrtimer_bases, cpu);
19721  			raw_spin_lock_irqsave(&base->lock, flags);
19722  			was_empty = list_empty(&base->to_pull);
19723 diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
19724 index 09a2ee5..345e0b7 100644
19725 --- a/kernel/irq/proc.c
19726 +++ b/kernel/irq/proc.c
19727 @@ -214,7 +214,7 @@ static int irq_spurious_proc_show(struct seq_file *m, void *v)
19728  
19729  static int irq_spurious_proc_open(struct inode *inode, struct file *file)
19730  {
19731 -	return single_open(file, irq_spurious_proc_show, NULL);
19732 +	return single_open(file, irq_spurious_proc_show, PDE(inode)->data);
19733  }
19734  
19735  static const struct file_operations irq_spurious_proc_fops = {
19736 diff --git a/kernel/latencytop.c b/kernel/latencytop.c
19737 index 877fb30..17110a4 100644
19738 --- a/kernel/latencytop.c
19739 +++ b/kernel/latencytop.c
19740 @@ -194,14 +194,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
19741  
19742  	account_global_scheduler_latency(tsk, &lat);
19743  
19744 -	/*
19745 -	 * short term hack; if we're > 32 we stop; future we recycle:
19746 -	 */
19747 -	tsk->latency_record_count++;
19748 -	if (tsk->latency_record_count >= LT_SAVECOUNT)
19749 -		goto out_unlock;
19750 -
19751 -	for (i = 0; i < LT_SAVECOUNT; i++) {
19752 +	for (i = 0; i < tsk->latency_record_count; i++) {
19753  		struct latency_record *mylat;
19754  		int same = 1;
19755  
19756 @@ -227,8 +220,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
19757  		}
19758  	}
19759  
19760 +	/*
19761 +	 * short term hack; if we're > 32 we stop; future we recycle:
19762 +	 */
19763 +	if (tsk->latency_record_count >= LT_SAVECOUNT)
19764 +		goto out_unlock;
19765 +
19766  	/* Allocated a new one: */
19767 -	i = tsk->latency_record_count;
19768 +	i = tsk->latency_record_count++;
19769  	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
19770  
19771  out_unlock:
19772 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
19773 index b98bed3..65b09a8 100644
19774 --- a/kernel/perf_event.c
19775 +++ b/kernel/perf_event.c
19776 @@ -1620,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
19777  {
19778  	raw_spin_lock(&ctx->lock);
19779  
19780 -	/* Rotate the first entry last of non-pinned groups */
19781 -	list_rotate_left(&ctx->flexible_groups);
19782 +	/*
19783 +	 * Rotate the first entry last of non-pinned groups. Rotation might be
19784 +	 * disabled by the inheritance code.
19785 +	 */
19786 +	if (!ctx->rotate_disable)
19787 +		list_rotate_left(&ctx->flexible_groups);
19788  
19789  	raw_spin_unlock(&ctx->lock);
19790  }
19791 @@ -1773,7 +1777,13 @@ static u64 perf_event_read(struct perf_event *event)
19792  		unsigned long flags;
19793  
19794  		raw_spin_lock_irqsave(&ctx->lock, flags);
19795 -		update_context_time(ctx);
19796 +		/*
19797 +		 * may read while context is not active
19798 +		 * (e.g., thread is blocked), in that case
19799 +		 * we cannot update context time
19800 +		 */
19801 +		if (ctx->is_active)
19802 +			update_context_time(ctx);
19803  		update_event_times(event);
19804  		raw_spin_unlock_irqrestore(&ctx->lock, flags);
19805  	}
19806 @@ -5616,6 +5626,7 @@ int perf_event_init_task(struct task_struct *child)
19807  	struct perf_event *event;
19808  	struct task_struct *parent = current;
19809  	int inherited_all = 1;
19810 +	unsigned long flags;
19811  	int ret = 0;
19812  
19813  	child->perf_event_ctxp = NULL;
19814 @@ -5656,6 +5667,15 @@ int perf_event_init_task(struct task_struct *child)
19815  			break;
19816  	}
19817  
19818 +	/*
19819 +	 * We can't hold ctx->lock when iterating the ->flexible_group list due
19820 +	 * to allocations, but we need to prevent rotation because
19821 +	 * rotate_ctx() will change the list from interrupt context.
19822 +	 */
19823 +	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
19824 +	parent_ctx->rotate_disable = 1;
19825 +	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
19826 +
19827  	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
19828  		ret = inherit_task_group(event, parent, parent_ctx, child,
19829  					 &inherited_all);
19830 @@ -5663,6 +5683,10 @@ int perf_event_init_task(struct task_struct *child)
19831  			break;
19832  	}
19833  
19834 +	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
19835 +	parent_ctx->rotate_disable = 0;
19836 +	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
19837 +
19838  	child_ctx = child->perf_event_ctxp;
19839  
19840  	if (child_ctx && inherited_all) {
19841 diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
19842 index 645e541..0da2837 100644
19843 --- a/kernel/pm_qos_params.c
19844 +++ b/kernel/pm_qos_params.c
19845 @@ -120,10 +120,10 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
19846  
19847  	switch (o->type) {
19848  	case PM_QOS_MIN:
19849 -		return plist_last(&o->requests)->prio;
19850 +		return plist_first(&o->requests)->prio;
19851  
19852  	case PM_QOS_MAX:
19853 -		return plist_first(&o->requests)->prio;
19854 +		return plist_last(&o->requests)->prio;
19855  
19856  	default:
19857  		/* runtime check for not using enum */
19858 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
19859 index 8dc31e0..7a931a9 100644
19860 --- a/kernel/power/hibernate.c
19861 +++ b/kernel/power/hibernate.c
19862 @@ -326,7 +326,6 @@ static int create_image(int platform_mode)
19863  int hibernation_snapshot(int platform_mode)
19864  {
19865  	int error;
19866 -	gfp_t saved_mask;
19867  
19868  	error = platform_begin(platform_mode);
19869  	if (error)
19870 @@ -338,7 +337,7 @@ int hibernation_snapshot(int platform_mode)
19871  		goto Close;
19872  
19873  	suspend_console();
19874 -	saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
19875 +	pm_restrict_gfp_mask();
19876  	error = dpm_suspend_start(PMSG_FREEZE);
19877  	if (error)
19878  		goto Recover_platform;
19879 @@ -347,7 +346,10 @@ int hibernation_snapshot(int platform_mode)
19880  		goto Recover_platform;
19881  
19882  	error = create_image(platform_mode);
19883 -	/* Control returns here after successful restore */
19884 +	/*
19885 +	 * Control returns here (1) after the image has been created or the
19886 +	 * image creation has failed and (2) after a successful restore.
19887 +	 */
19888  
19889   Resume_devices:
19890  	/* We may need to release the preallocated image pages here. */
19891 @@ -356,7 +358,10 @@ int hibernation_snapshot(int platform_mode)
19892  
19893  	dpm_resume_end(in_suspend ?
19894  		(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
19895 -	set_gfp_allowed_mask(saved_mask);
19896 +
19897 +	if (error || !in_suspend)
19898 +		pm_restore_gfp_mask();
19899 +
19900  	resume_console();
19901   Close:
19902  	platform_end(platform_mode);
19903 @@ -451,17 +456,16 @@ static int resume_target_kernel(bool platform_mode)
19904  int hibernation_restore(int platform_mode)
19905  {
19906  	int error;
19907 -	gfp_t saved_mask;
19908  
19909  	pm_prepare_console();
19910  	suspend_console();
19911 -	saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
19912 +	pm_restrict_gfp_mask();
19913  	error = dpm_suspend_start(PMSG_QUIESCE);
19914  	if (!error) {
19915  		error = resume_target_kernel(platform_mode);
19916  		dpm_resume_end(PMSG_RECOVER);
19917  	}
19918 -	set_gfp_allowed_mask(saved_mask);
19919 +	pm_restore_gfp_mask();
19920  	resume_console();
19921  	pm_restore_console();
19922  	return error;
19923 @@ -475,7 +479,6 @@ int hibernation_restore(int platform_mode)
19924  int hibernation_platform_enter(void)
19925  {
19926  	int error;
19927 -	gfp_t saved_mask;
19928  
19929  	if (!hibernation_ops)
19930  		return -ENOSYS;
19931 @@ -491,7 +494,6 @@ int hibernation_platform_enter(void)
19932  
19933  	entering_platform_hibernation = true;
19934  	suspend_console();
19935 -	saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
19936  	error = dpm_suspend_start(PMSG_HIBERNATE);
19937  	if (error) {
19938  		if (hibernation_ops->recover)
19939 @@ -535,7 +537,6 @@ int hibernation_platform_enter(void)
19940   Resume_devices:
19941  	entering_platform_hibernation = false;
19942  	dpm_resume_end(PMSG_RESTORE);
19943 -	set_gfp_allowed_mask(saved_mask);
19944  	resume_console();
19945  
19946   Close:
19947 @@ -643,6 +644,7 @@ int hibernate(void)
19948  		swsusp_free();
19949  		if (!error)
19950  			power_down();
19951 +		pm_restore_gfp_mask();
19952  	} else {
19953  		pr_debug("PM: Image restored successfully.\n");
19954  	}
19955 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
19956 index 7335952..ecf7705 100644
19957 --- a/kernel/power/suspend.c
19958 +++ b/kernel/power/suspend.c
19959 @@ -197,7 +197,6 @@ static int suspend_enter(suspend_state_t state)
19960  int suspend_devices_and_enter(suspend_state_t state)
19961  {
19962  	int error;
19963 -	gfp_t saved_mask;
19964  
19965  	if (!suspend_ops)
19966  		return -ENOSYS;
19967 @@ -208,7 +207,7 @@ int suspend_devices_and_enter(suspend_state_t state)
19968  			goto Close;
19969  	}
19970  	suspend_console();
19971 -	saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
19972 +	pm_restrict_gfp_mask();
19973  	suspend_test_start();
19974  	error = dpm_suspend_start(PMSG_SUSPEND);
19975  	if (error) {
19976 @@ -225,7 +224,7 @@ int suspend_devices_and_enter(suspend_state_t state)
19977  	suspend_test_start();
19978  	dpm_resume_end(PMSG_RESUME);
19979  	suspend_test_finish("resume devices");
19980 -	set_gfp_allowed_mask(saved_mask);
19981 +	pm_restore_gfp_mask();
19982  	resume_console();
19983   Close:
19984  	if (suspend_ops->end)
19985 diff --git a/kernel/power/user.c b/kernel/power/user.c
19986 index e819e17..c36c3b9 100644
19987 --- a/kernel/power/user.c
19988 +++ b/kernel/power/user.c
19989 @@ -137,7 +137,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
19990  	free_all_swap_pages(data->swap);
19991  	if (data->frozen)
19992  		thaw_processes();
19993 -	pm_notifier_call_chain(data->mode == O_WRONLY ?
19994 +	pm_notifier_call_chain(data->mode == O_RDONLY ?
19995  			PM_POST_HIBERNATION : PM_POST_RESTORE);
19996  	atomic_inc(&snapshot_device_available);
19997  
19998 @@ -263,6 +263,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
19999  	case SNAPSHOT_UNFREEZE:
20000  		if (!data->frozen || data->ready)
20001  			break;
20002 +		pm_restore_gfp_mask();
20003  		thaw_processes();
20004  		usermodehelper_enable();
20005  		data->frozen = 0;
20006 @@ -275,6 +276,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
20007  			error = -EPERM;
20008  			break;
20009  		}
20010 +		pm_restore_gfp_mask();
20011  		error = hibernation_snapshot(data->platform_support);
20012  		if (!error)
20013  			error = put_user(in_suspend, (int __user *)arg);
20014 diff --git a/kernel/printk.c b/kernel/printk.c
20015 index 9dc8ea1..2dc36b4 100644
20016 --- a/kernel/printk.c
20017 +++ b/kernel/printk.c
20018 @@ -1072,13 +1072,15 @@ void printk_tick(void)
20019  
20020  int printk_needs_cpu(int cpu)
20021  {
20022 +	if (unlikely(cpu_is_offline(cpu)))
20023 +		printk_tick();
20024  	return per_cpu(printk_pending, cpu);
20025  }
20026  
20027  void wake_up_klogd(void)
20028  {
20029  	if (!trace_override && waitqueue_active(&log_wait))
20030 -		__raw_get_cpu_var(printk_pending) = 1;
20031 +		this_cpu_write(printk_pending, 1);
20032  }
20033  
20034  /**
20035 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
20036 index f34d798..bf768d7 100644
20037 --- a/kernel/ptrace.c
20038 +++ b/kernel/ptrace.c
20039 @@ -313,7 +313,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
20040  		child->exit_code = data;
20041  		dead = __ptrace_detach(current, child);
20042  		if (!child->exit_state)
20043 -			wake_up_process(child);
20044 +			wake_up_state(child, TASK_TRACED | TASK_STOPPED);
20045  	}
20046  	write_unlock_irq(&tasklist_lock);
20047  
20048 diff --git a/kernel/sched.c b/kernel/sched.c
20049 index 1b13c8e..ebbefa4 100644
20050 --- a/kernel/sched.c
20051 +++ b/kernel/sched.c
20052 @@ -84,6 +84,12 @@
20053  
20054  static void litmus_tick(struct rq*, struct task_struct*);
20055  
20056 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
20057 +extern void send_signal_to_monitor(void);
20058 +extern void send_release_signal_to_monitor(void);
20059 +extern atomic_t pend_release_signal;
20060 +#endif
20061 +
20062  #define CREATE_TRACE_POINTS
20063  #include <trace/events/sched.h>
20064  
20065 @@ -583,7 +589,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
20066  	 * cases. LITMUS^RT amplifies the effects of this problem. Hence, we
20067  	 * turn it off to avoid stalling clocks. */
20068  	/*
20069 -	if (test_tsk_need_resched(p))
20070 +	if (rq->curr->se.on_rq && test_tsk_need_resched(p))
20071  		rq->skip_clock_update = 1;
20072  	*/
20073  }
20074 @@ -741,7 +747,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
20075  		size_t cnt, loff_t *ppos)
20076  {
20077  	char buf[64];
20078 -	char *cmp = buf;
20079 +	char *cmp;
20080  	int neg = 0;
20081  	int i;
20082  
20083 @@ -752,6 +758,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
20084  		return -EFAULT;
20085  
20086  	buf[cnt] = 0;
20087 +	cmp = strstrip(buf);
20088  
20089  	if (strncmp(buf, "NO_", 3) == 0) {
20090  		neg = 1;
20091 @@ -759,9 +766,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
20092  	}
20093  
20094  	for (i = 0; sched_feat_names[i]; i++) {
20095 -		int len = strlen(sched_feat_names[i]);
20096 -
20097 -		if (strncmp(cmp, sched_feat_names[i], len) == 0) {
20098 +		if (strcmp(cmp, sched_feat_names[i]) == 0) {
20099  			if (neg)
20100  				sysctl_sched_features &= ~(1UL << i);
20101  			else
20102 @@ -1877,12 +1882,6 @@ static void dec_nr_running(struct rq *rq)
20103  
20104  static void set_load_weight(struct task_struct *p)
20105  {
20106 -	if (task_has_rt_policy(p)) {
20107 -		p->se.load.weight = 0;
20108 -		p->se.load.inv_weight = WMULT_CONST;
20109 -		return;
20110 -	}
20111 -
20112  	/*
20113  	 * SCHED_IDLE tasks get minimal weight:
20114  	 */
20115 @@ -2557,7 +2556,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
20116  	 * Revert to default priority/policy on fork if requested.
20117  	 */
20118  	if (unlikely(p->sched_reset_on_fork)) {
20119 -		if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
20120 +		if (p->policy == SCHED_FIFO || p->policy == SCHED_RR ||
20121 +		    p->policy == SCHED_LITMUS) {
20122  			p->policy = SCHED_NORMAL;
20123  			p->normal_prio = p->static_prio;
20124  		}
20125 @@ -3010,6 +3010,15 @@ static long calc_load_fold_active(struct rq *this_rq)
20126  	return delta;
20127  }
20128  
20129 +static unsigned long
20130 +calc_load(unsigned long load, unsigned long exp, unsigned long active)
20131 +{
20132 +	load *= exp;
20133 +	load += active * (FIXED_1 - exp);
20134 +	load += 1UL << (FSHIFT - 1);
20135 +	return load >> FSHIFT;
20136 +}
20137 +
20138  #ifdef CONFIG_NO_HZ
20139  /*
20140   * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
20141 @@ -3039,6 +3048,128 @@ static long calc_load_fold_idle(void)
20142  
20143  	return delta;
20144  }
20145 +
20146 +/**
20147 + * fixed_power_int - compute: x^n, in O(log n) time
20148 + *
20149 + * @x:         base of the power
20150 + * @frac_bits: fractional bits of @x
20151 + * @n:         power to raise @x to.
20152 + *
20153 + * By exploiting the relation between the definition of the natural power
20154 + * function: x^n := x*x*...*x (x multiplied by itself for n times), and
20155 + * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
20156 + * (where: n_i \elem {0, 1}, the binary vector representing n),
20157 + * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
20158 + * of course trivially computable in O(log_2 n), the length of our binary
20159 + * vector.
20160 + */
20161 +static unsigned long
20162 +fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
20163 +{
20164 +	unsigned long result = 1UL << frac_bits;
20165 +
20166 +	if (n) for (;;) {
20167 +		if (n & 1) {
20168 +			result *= x;
20169 +			result += 1UL << (frac_bits - 1);
20170 +			result >>= frac_bits;
20171 +		}
20172 +		n >>= 1;
20173 +		if (!n)
20174 +			break;
20175 +		x *= x;
20176 +		x += 1UL << (frac_bits - 1);
20177 +		x >>= frac_bits;
20178 +	}
20179 +
20180 +	return result;
20181 +}
20182 +
20183 +/*
20184 + * a1 = a0 * e + a * (1 - e)
20185 + *
20186 + * a2 = a1 * e + a * (1 - e)
20187 + *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
20188 + *    = a0 * e^2 + a * (1 - e) * (1 + e)
20189 + *
20190 + * a3 = a2 * e + a * (1 - e)
20191 + *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
20192 + *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
20193 + *
20194 + *  ...
20195 + *
20196 + * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
20197 + *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
20198 + *    = a0 * e^n + a * (1 - e^n)
20199 + *
20200 + * [1] application of the geometric series:
20201 + *
20202 + *              n         1 - x^(n+1)
20203 + *     S_n := \Sum x^i = -------------
20204 + *             i=0          1 - x
20205 + */
20206 +static unsigned long
20207 +calc_load_n(unsigned long load, unsigned long exp,
20208 +	    unsigned long active, unsigned int n)
20209 +{
20210 +
20211 +	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
20212 +}
20213 +
20214 +/*
20215 + * NO_HZ can leave us missing all per-cpu ticks calling
20216 + * calc_load_account_active(), but since an idle CPU folds its delta into
20217 + * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
20218 + * in the pending idle delta if our idle period crossed a load cycle boundary.
20219 + *
20220 + * Once we've updated the global active value, we need to apply the exponential
20221 + * weights adjusted to the number of cycles missed.
20222 + */
20223 +static void calc_global_nohz(unsigned long ticks)
20224 +{
20225 +	long delta, active, n;
20226 +
20227 +	if (time_before(jiffies, calc_load_update))
20228 +		return;
20229 +
20230 +	/*
20231 +	 * If we crossed a calc_load_update boundary, make sure to fold
20232 +	 * any pending idle changes, the respective CPUs might have
20233 +	 * missed the tick driven calc_load_account_active() update
20234 +	 * due to NO_HZ.
20235 +	 */
20236 +	delta = calc_load_fold_idle();
20237 +	if (delta)
20238 +		atomic_long_add(delta, &calc_load_tasks);
20239 +
20240 +	/*
20241 +	 * If we were idle for multiple load cycles, apply them.
20242 +	 */
20243 +	if (ticks >= LOAD_FREQ) {
20244 +		n = ticks / LOAD_FREQ;
20245 +
20246 +		active = atomic_long_read(&calc_load_tasks);
20247 +		active = active > 0 ? active * FIXED_1 : 0;
20248 +
20249 +		avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
20250 +		avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
20251 +		avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
20252 +
20253 +		calc_load_update += n * LOAD_FREQ;
20254 +	}
20255 +
20256 +	/*
20257 +	 * Its possible the remainder of the above division also crosses
20258 +	 * a LOAD_FREQ period, the regular check in calc_global_load()
20259 +	 * which comes after this will take care of that.
20260 +	 *
20261 +	 * Consider us being 11 ticks before a cycle completion, and us
20262 +	 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
20263 +	 * age us 4 cycles, and the test in calc_global_load() will
20264 +	 * pick up the final one.
20265 +	 */
20266 +}
20267  #else
20268  static void calc_load_account_idle(struct rq *this_rq)
20269  {
20270 @@ -3048,6 +3179,10 @@ static inline long calc_load_fold_idle(void)
20271  {
20272  	return 0;
20273  }
20274 +
20275 +static void calc_global_nohz(unsigned long ticks)
20276 +{
20277 +}
20278  #endif
20279  
20280  /**
20281 @@ -3065,24 +3200,17 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
20282  	loads[2] = (avenrun[2] + offset) << shift;
20283  }
20284  
20285 -static unsigned long
20286 -calc_load(unsigned long load, unsigned long exp, unsigned long active)
20287 -{
20288 -	load *= exp;
20289 -	load += active * (FIXED_1 - exp);
20290 -	return load >> FSHIFT;
20291 -}
20292 -
20293  /*
20294   * calc_load - update the avenrun load estimates 10 ticks after the
20295   * CPUs have updated calc_load_tasks.
20296   */
20297 -void calc_global_load(void)
20298 +void calc_global_load(unsigned long ticks)
20299  {
20300 -	unsigned long upd = calc_load_update + 10;
20301  	long active;
20302  
20303 -	if (time_before(jiffies, upd))
20304 +	calc_global_nohz(ticks);
20305 +
20306 +	if (time_before(jiffies, calc_load_update + 10))
20307  		return;
20308  
20309  	active = atomic_long_read(&calc_load_tasks);
20310 @@ -3744,7 +3872,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev)
20311  {
20312  	if (prev->se.on_rq)
20313  		update_rq_clock(rq);
20314 -	rq->skip_clock_update = 0;
20315  	prev->sched_class->put_prev_task(rq, prev);
20316  }
20317  
20318 @@ -3787,7 +3914,6 @@ pick_next_task(struct rq *rq)
20319  		class = class->next;
20320  	}
20321  }
20322 -
20323  /*
20324   * schedule() is the main scheduler function.
20325   */
20326 @@ -3809,6 +3935,9 @@ need_resched:
20327  	release_kernel_lock(prev);
20328  need_resched_nonpreemptible:
20329  	TS_SCHED_START;
20330 +	TS_LVLA_SCHED_START;
20331 +	TS_LVLB_SCHED_START;
20332 +	TS_LVLC_SCHED_START;
20333  	sched_trace_task_switch_away(prev);
20334  
20335  	schedule_debug(prev);
20336 @@ -3817,7 +3946,6 @@ need_resched_nonpreemptible:
20337  		hrtick_clear(rq);
20338  
20339  	raw_spin_lock_irq(&rq->lock);
20340 -	clear_tsk_need_resched(prev);
20341  
20342  	switch_count = &prev->nivcsw;
20343  	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
20344 @@ -3849,6 +3977,8 @@ need_resched_nonpreemptible:
20345  
20346  	put_prev_task(rq, prev);
20347  	next = pick_next_task(rq);
20348 +	clear_tsk_need_resched(prev);
20349 +	rq->skip_clock_update = 0;
20350  
20351  	if (likely(prev != next)) {
20352  		sched_info_switch(prev, next);
20353 @@ -3858,6 +3988,9 @@ need_resched_nonpreemptible:
20354  		rq->curr = next;
20355  		++*switch_count;
20356  
20357 +		TS_LVLA_SCHED_END(next);
20358 +		TS_LVLB_SCHED_END(next);
20359 +		TS_LVLC_SCHED_END(next);
20360  		TS_SCHED_END(next);
20361  		TS_CXS_START(next);
20362  		context_switch(rq, prev, next); /* unlocks the rq */
20363 @@ -3877,6 +4010,16 @@ need_resched_nonpreemptible:
20364  
20365  	sched_trace_task_switch_to(current);
20366  
20367 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME    
20368 +    /* send signal here */
20369 +    if (prev->policy == SCHED_LITMUS) {
20370 +        if (atomic_read(&pend_release_signal) == 1) {
20371 +            send_release_signal_to_monitor();
20372 +            atomic_set(&pend_release_signal, 0);
20373 +        }
20374 +        send_signal_to_monitor();
20375 +    }
20376 +#endif    
20377  	post_schedule(rq);
20378  
20379  	if (sched_state_validate_switch() || unlikely(reacquire_kernel_lock(prev)))
20380 @@ -3886,8 +4029,7 @@ need_resched_nonpreemptible:
20381  	if (need_resched())
20382  		goto need_resched;
20383  
20384 -	if (srp_active())
20385 -		srp_ceiling_block();
20386 +	srp_ceiling_block();
20387  }
20388  EXPORT_SYMBOL(schedule);
20389  
20390 @@ -5433,7 +5575,19 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
20391  	idle->se.exec_start = sched_clock();
20392  
20393  	cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
20394 +	/*
20395 +	 * We're having a chicken and egg problem, even though we are
20396 +	 * holding rq->lock, the cpu isn't yet set to this cpu so the
20397 +	 * lockdep check in task_group() will fail.
20398 +	 *
20399 +	 * Similar case to sched_fork(). / Alternatively we could
20400 +	 * use task_rq_lock() here and obtain the other rq->lock.
20401 +	 *
20402 +	 * Silence PROVE_RCU
20403 +	 */
20404 +	rcu_read_lock();
20405  	__set_task_cpu(idle, cpu);
20406 +	rcu_read_unlock();
20407  
20408  	rq->curr = rq->idle = idle;
20409  #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
20410 diff --git a/kernel/smp.c b/kernel/smp.c
20411 index ed6aacf..1ba1ba4 100644
20412 --- a/kernel/smp.c
20413 +++ b/kernel/smp.c
20414 @@ -194,6 +194,24 @@ void generic_smp_call_function_interrupt(void)
20415  	list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
20416  		int refs;
20417  
20418 +		/*
20419 +		 * Since we walk the list without any locks, we might
20420 +		 * see an entry that was completed, removed from the
20421 +		 * list and is in the process of being reused.
20422 +		 *
20423 +		 * We must check that the cpu is in the cpumask before
20424 +		 * checking the refs, and both must be set before
20425 +		 * executing the callback on this cpu.
20426 +		 */
20427 +
20428 +		if (!cpumask_test_cpu(cpu, data->cpumask))
20429 +			continue;
20430 +
20431 +		smp_rmb();
20432 +
20433 +		if (atomic_read(&data->refs) == 0)
20434 +			continue;
20435 +
20436  		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
20437  			continue;
20438  
20439 @@ -202,6 +220,8 @@ void generic_smp_call_function_interrupt(void)
20440  		refs = atomic_dec_return(&data->refs);
20441  		WARN_ON(refs < 0);
20442  		if (!refs) {
20443 +			WARN_ON(!cpumask_empty(data->cpumask));
20444 +
20445  			raw_spin_lock(&call_function.lock);
20446  			list_del_rcu(&data->csd.list);
20447  			raw_spin_unlock(&call_function.lock);
20448 @@ -453,11 +473,21 @@ void smp_call_function_many(const struct cpumask *mask,
20449  
20450  	data = &__get_cpu_var(cfd_data);
20451  	csd_lock(&data->csd);
20452 +	BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
20453  
20454  	data->csd.func = func;
20455  	data->csd.info = info;
20456  	cpumask_and(data->cpumask, mask, cpu_online_mask);
20457  	cpumask_clear_cpu(this_cpu, data->cpumask);
20458 +
20459 +	/*
20460 +	 * To ensure the interrupt handler gets an complete view
20461 +	 * we order the cpumask and refs writes and order the read
20462 +	 * of them in the interrupt handler.  In addition we may
20463 +	 * only clear our own cpu bit from the mask.
20464 +	 */
20465 +	smp_wmb();
20466 +
20467  	atomic_set(&data->refs, cpumask_weight(data->cpumask));
20468  
20469  	raw_spin_lock_irqsave(&call_function.lock, flags);
20470 diff --git a/kernel/sys.c b/kernel/sys.c
20471 index 7f5a0cd..66136ca 100644
20472 --- a/kernel/sys.c
20473 +++ b/kernel/sys.c
20474 @@ -1377,7 +1377,8 @@ static int check_prlimit_permission(struct task_struct *task)
20475  	const struct cred *cred = current_cred(), *tcred;
20476  
20477  	tcred = __task_cred(task);
20478 -	if ((cred->uid != tcred->euid ||
20479 +	if (current != task &&
20480 +	    (cred->uid != tcred->euid ||
20481  	     cred->uid != tcred->suid ||
20482  	     cred->uid != tcred->uid  ||
20483  	     cred->gid != tcred->egid ||
20484 diff --git a/kernel/timer.c b/kernel/timer.c
20485 index 97bf05b..102ad37 100644
20486 --- a/kernel/timer.c
20487 +++ b/kernel/timer.c
20488 @@ -1252,6 +1252,12 @@ unsigned long get_next_timer_interrupt(unsigned long now)
20489  	struct tvec_base *base = __get_cpu_var(tvec_bases);
20490  	unsigned long expires;
20491  
20492 +	/*
20493 +	 * Pretend that there is no timer pending if the cpu is offline.
20494 +	 * Possible pending timers will be migrated later to an active cpu.
20495 +	 */
20496 +	if (cpu_is_offline(smp_processor_id()))
20497 +		return now + NEXT_TIMER_MAX_DELTA;
20498  	spin_lock(&base->lock);
20499  	if (time_before_eq(base->next_timer, base->timer_jiffies))
20500  		base->next_timer = __next_timer_interrupt(base);
20501 @@ -1316,7 +1322,7 @@ void do_timer(unsigned long ticks)
20502  {
20503  	jiffies_64 += ticks;
20504  	update_wall_time();
20505 -	calc_global_load();
20506 +	calc_global_load(ticks);
20507  }
20508  
20509  #ifdef __ARCH_WANT_SYS_ALARM
20510 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
20511 index 9ec59f5..7702f5a 100644
20512 --- a/kernel/trace/trace.c
20513 +++ b/kernel/trace/trace.c
20514 @@ -2320,11 +2320,19 @@ tracing_write_stub(struct file *filp, const char __user *ubuf,
20515  	return count;
20516  }
20517  
20518 +static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
20519 +{
20520 +	if (file->f_mode & FMODE_READ)
20521 +		return seq_lseek(file, offset, origin);
20522 +	else
20523 +		return 0;
20524 +}
20525 +
20526  static const struct file_operations tracing_fops = {
20527  	.open		= tracing_open,
20528  	.read		= seq_read,
20529  	.write		= tracing_write_stub,
20530 -	.llseek		= seq_lseek,
20531 +	.llseek		= tracing_seek,
20532  	.release	= tracing_release,
20533  };
20534  
20535 diff --git a/kernel/user.c b/kernel/user.c
20536 index 7e72614..8ce395f 100644
20537 --- a/kernel/user.c
20538 +++ b/kernel/user.c
20539 @@ -157,6 +157,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
20540  		spin_lock_irq(&uidhash_lock);
20541  		up = uid_hash_find(uid, hashent);
20542  		if (up) {
20543 +			put_user_ns(ns);
20544  			key_put(new->uid_keyring);
20545  			key_put(new->session_keyring);
20546  			kmem_cache_free(uid_cachep, new);
20547 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
20548 index 7f9c3c5..e359b2e 100644
20549 --- a/kernel/watchdog.c
20550 +++ b/kernel/watchdog.c
20551 @@ -377,7 +377,8 @@ static int watchdog_nmi_enable(int cpu)
20552  		goto out_save;
20553  	}
20554  
20555 -	printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
20556 +	printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n",
20557 +	       cpu, PTR_ERR(event));
20558  	return -1;
20559  
20560  	/* success path */
20561 @@ -440,9 +441,6 @@ static int watchdog_enable(int cpu)
20562  		wake_up_process(p);
20563  	}
20564  
20565 -	/* if any cpu succeeds, watchdog is considered enabled for the system */
20566 -	watchdog_enabled = 1;
20567 -
20568  	return 0;
20569  }
20570  
20571 @@ -470,12 +468,16 @@ static void watchdog_disable(int cpu)
20572  static void watchdog_enable_all_cpus(void)
20573  {
20574  	int cpu;
20575 -	int result = 0;
20576 +
20577 +	watchdog_enabled = 0;
20578  
20579  	for_each_online_cpu(cpu)
20580 -		result += watchdog_enable(cpu);
20581 +		if (!watchdog_enable(cpu))
20582 +			/* if any cpu succeeds, watchdog is considered
20583 +			   enabled for the system */
20584 +			watchdog_enabled = 1;
20585  
20586 -	if (result)
20587 +	if (!watchdog_enabled)
20588  		printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
20589  
20590  }
20591 @@ -503,10 +505,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write,
20592  {
20593  	proc_dointvec(table, write, buffer, length, ppos);
20594  
20595 -	if (watchdog_enabled)
20596 -		watchdog_enable_all_cpus();
20597 -	else
20598 -		watchdog_disable_all_cpus();
20599 +	if (write) {
20600 +		if (watchdog_enabled)
20601 +			watchdog_enable_all_cpus();
20602 +		else
20603 +			watchdog_disable_all_cpus();
20604 +	}
20605  	return 0;
20606  }
20607  
20608 diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
20609 index ec9048e..209448e 100644
20610 --- a/lib/percpu_counter.c
20611 +++ b/lib/percpu_counter.c
20612 @@ -76,6 +76,7 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
20613  	if (!fbc->counters)
20614  		return -ENOMEM;
20615  #ifdef CONFIG_HOTPLUG_CPU
20616 +	INIT_LIST_HEAD(&fbc->list);
20617  	mutex_lock(&percpu_counters_lock);
20618  	list_add(&fbc->list, &percpu_counters);
20619  	mutex_unlock(&percpu_counters_lock);
20620 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
20621 index efd16fa..296eb81 100644
20622 --- a/lib/radix-tree.c
20623 +++ b/lib/radix-tree.c
20624 @@ -82,6 +82,16 @@ struct radix_tree_preload {
20625  };
20626  static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
20627  
20628 +static inline void *ptr_to_indirect(void *ptr)
20629 +{
20630 +	return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
20631 +}
20632 +
20633 +static inline void *indirect_to_ptr(void *ptr)
20634 +{
20635 +	return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
20636 +}
20637 +
20638  static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
20639  {
20640  	return root->gfp_mask & __GFP_BITS_MASK;
20641 @@ -265,7 +275,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
20642  			return -ENOMEM;
20643  
20644  		/* Increase the height.  */
20645 -		node->slots[0] = radix_tree_indirect_to_ptr(root->rnode);
20646 +		node->slots[0] = indirect_to_ptr(root->rnode);
20647  
20648  		/* Propagate the aggregated tag info into the new root */
20649  		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
20650 @@ -276,7 +286,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
20651  		newheight = root->height+1;
20652  		node->height = newheight;
20653  		node->count = 1;
20654 -		node = radix_tree_ptr_to_indirect(node);
20655 +		node = ptr_to_indirect(node);
20656  		rcu_assign_pointer(root->rnode, node);
20657  		root->height = newheight;
20658  	} while (height > root->height);
20659 @@ -309,7 +319,7 @@ int radix_tree_insert(struct radix_tree_root *root,
20660  			return error;
20661  	}
20662  
20663 -	slot = radix_tree_indirect_to_ptr(root->rnode);
20664 +	slot = indirect_to_ptr(root->rnode);
20665  
20666  	height = root->height;
20667  	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
20668 @@ -325,8 +335,7 @@ int radix_tree_insert(struct radix_tree_root *root,
20669  				rcu_assign_pointer(node->slots[offset], slot);
20670  				node->count++;
20671  			} else
20672 -				rcu_assign_pointer(root->rnode,
20673 -					radix_tree_ptr_to_indirect(slot));
20674 +				rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
20675  		}
20676  
20677  		/* Go a level down */
20678 @@ -374,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
20679  			return NULL;
20680  		return is_slot ? (void *)&root->rnode : node;
20681  	}
20682 -	node = radix_tree_indirect_to_ptr(node);
20683 +	node = indirect_to_ptr(node);
20684  
20685  	height = node->height;
20686  	if (index > radix_tree_maxindex(height))
20687 @@ -393,7 +402,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
20688  		height--;
20689  	} while (height > 0);
20690  
20691 -	return is_slot ? (void *)slot:node;
20692 +	return is_slot ? (void *)slot : indirect_to_ptr(node);
20693  }
20694  
20695  /**
20696 @@ -455,7 +464,7 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
20697  	height = root->height;
20698  	BUG_ON(index > radix_tree_maxindex(height));
20699  
20700 -	slot = radix_tree_indirect_to_ptr(root->rnode);
20701 +	slot = indirect_to_ptr(root->rnode);
20702  	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
20703  
20704  	while (height > 0) {
20705 @@ -509,7 +518,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
20706  
20707  	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
20708  	pathp->node = NULL;
20709 -	slot = radix_tree_indirect_to_ptr(root->rnode);
20710 +	slot = indirect_to_ptr(root->rnode);
20711  
20712  	while (height > 0) {
20713  		int offset;
20714 @@ -579,7 +588,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
20715  
20716  	if (!radix_tree_is_indirect_ptr(node))
20717  		return (index == 0);
20718 -	node = radix_tree_indirect_to_ptr(node);
20719 +	node = indirect_to_ptr(node);
20720  
20721  	height = node->height;
20722  	if (index > radix_tree_maxindex(height))
20723 @@ -666,7 +675,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
20724  	}
20725  
20726  	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
20727 -	slot = radix_tree_indirect_to_ptr(root->rnode);
20728 +	slot = indirect_to_ptr(root->rnode);
20729  
20730  	/*
20731  	 * we fill the path from (root->height - 2) to 0, leaving the index at
20732 @@ -897,7 +906,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
20733  		results[0] = node;
20734  		return 1;
20735  	}
20736 -	node = radix_tree_indirect_to_ptr(node);
20737 +	node = indirect_to_ptr(node);
20738  
20739  	max_index = radix_tree_maxindex(node->height);
20740  
20741 @@ -916,7 +925,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
20742  			slot = *(((void ***)results)[ret + i]);
20743  			if (!slot)
20744  				continue;
20745 -			results[ret + nr_found] = rcu_dereference_raw(slot);
20746 +			results[ret + nr_found] =
20747 +				indirect_to_ptr(rcu_dereference_raw(slot));
20748  			nr_found++;
20749  		}
20750  		ret += nr_found;
20751 @@ -965,7 +975,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
20752  		results[0] = (void **)&root->rnode;
20753  		return 1;
20754  	}
20755 -	node = radix_tree_indirect_to_ptr(node);
20756 +	node = indirect_to_ptr(node);
20757  
20758  	max_index = radix_tree_maxindex(node->height);
20759  
20760 @@ -1090,7 +1100,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
20761  		results[0] = node;
20762  		return 1;
20763  	}
20764 -	node = radix_tree_indirect_to_ptr(node);
20765 +	node = indirect_to_ptr(node);
20766  
20767  	max_index = radix_tree_maxindex(node->height);
20768  
20769 @@ -1109,7 +1119,8 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
20770  			slot = *(((void ***)results)[ret + i]);
20771  			if (!slot)
20772  				continue;
20773 -			results[ret + nr_found] = rcu_dereference_raw(slot);
20774 +			results[ret + nr_found] =
20775 +				indirect_to_ptr(rcu_dereference_raw(slot));
20776  			nr_found++;
20777  		}
20778  		ret += nr_found;
20779 @@ -1159,7 +1170,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
20780  		results[0] = (void **)&root->rnode;
20781  		return 1;
20782  	}
20783 -	node = radix_tree_indirect_to_ptr(node);
20784 +	node = indirect_to_ptr(node);
20785  
20786  	max_index = radix_tree_maxindex(node->height);
20787  
20788 @@ -1195,7 +1206,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
20789  		void *newptr;
20790  
20791  		BUG_ON(!radix_tree_is_indirect_ptr(to_free));
20792 -		to_free = radix_tree_indirect_to_ptr(to_free);
20793 +		to_free = indirect_to_ptr(to_free);
20794  
20795  		/*
20796  		 * The candidate node has more than one child, or its child
20797 @@ -1208,16 +1219,39 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
20798  
20799  		/*
20800  		 * We don't need rcu_assign_pointer(), since we are simply
20801 -		 * moving the node from one part of the tree to another. If
20802 -		 * it was safe to dereference the old pointer to it
20803 +		 * moving the node from one part of the tree to another: if it
20804 +		 * was safe to dereference the old pointer to it
20805  		 * (to_free->slots[0]), it will be safe to dereference the new
20806 -		 * one (root->rnode).
20807 +		 * one (root->rnode) as far as dependent read barriers go.
20808  		 */
20809  		newptr = to_free->slots[0];
20810  		if (root->height > 1)
20811 -			newptr = radix_tree_ptr_to_indirect(newptr);
20812 +			newptr = ptr_to_indirect(newptr);
20813  		root->rnode = newptr;
20814  		root->height--;
20815 +
20816 +		/*
20817 +		 * We have a dilemma here. The node's slot[0] must not be
20818 +		 * NULLed in case there are concurrent lookups expecting to
20819 +		 * find the item. However if this was a bottom-level node,
20820 +		 * then it may be subject to the slot pointer being visible
20821 +		 * to callers dereferencing it. If item corresponding to
20822 +		 * slot[0] is subsequently deleted, these callers would expect
20823 +		 * their slot to become empty sooner or later.
20824 +		 *
20825 +		 * For example, lockless pagecache will look up a slot, deref
20826 +		 * the page pointer, and if the page is 0 refcount it means it
20827 +		 * was concurrently deleted from pagecache so try the deref
20828 +		 * again. Fortunately there is already a requirement for logic
20829 +		 * to retry the entire slot lookup -- the indirect pointer
20830 +		 * problem (replacing direct root node with an indirect pointer
20831 +		 * also results in a stale slot). So tag the slot as indirect
20832 +		 * to force callers to retry.
20833 +		 */
20834 +		if (root->height == 0)
20835 +			*((unsigned long *)&to_free->slots[0]) |=
20836 +						RADIX_TREE_INDIRECT_PTR;
20837 +
20838  		radix_tree_node_free(to_free);
20839  	}
20840  }
20841 @@ -1254,7 +1288,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
20842  		root->rnode = NULL;
20843  		goto out;
20844  	}
20845 -	slot = radix_tree_indirect_to_ptr(slot);
20846 +	slot = indirect_to_ptr(slot);
20847  
20848  	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
20849  	pathp->node = NULL;
20850 @@ -1296,8 +1330,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
20851  			radix_tree_node_free(to_free);
20852  
20853  		if (pathp->node->count) {
20854 -			if (pathp->node ==
20855 -					radix_tree_indirect_to_ptr(root->rnode))
20856 +			if (pathp->node == indirect_to_ptr(root->rnode))
20857  				radix_tree_shrink(root);
20858  			goto out;
20859  		}
20860 diff --git a/litmus/Kconfig b/litmus/Kconfig
20861 index a2f2678..6d52b5d 100644
20862 --- a/litmus/Kconfig
20863 +++ b/litmus/Kconfig
20864 @@ -23,16 +23,74 @@ config PLUGIN_PFAIR
20865  
20866  	  If unsure, say Yes.
20867  
20868 +config MERGE_TIMERS
20869 +       bool "Timer-merging Support"
20870 +       depends on HIGH_RES_TIMERS
20871 +       default y
20872 +       help
20873 +         Include support for merging timers.
20874 +
20875 +config MERGE_TIMERS_WINDOW
20876 +       int "Timer-merging Window (in nanoseconds)"
20877 +       depends on MERGE_TIMERS
20878 +       default 1000
20879 +       help
20880 +         Window within which seperate timers may be merged.
20881 +
20882  config RELEASE_MASTER
20883          bool "Release-master Support"
20884  	depends on ARCH_HAS_SEND_PULL_TIMERS
20885  	default n
20886  	help
20887 -           Allow one processor to act as a dedicated interrupt processor
20888 -           that services all timer interrupts, but that does not schedule
20889 -           real-time tasks. See RTSS'09 paper for details
20890 -	   (http://www.cs.unc.edu/~anderson/papers.html).
20891 -           Currently only supported by GSN-EDF.
20892 +          In GSN-EDF, allow one processor to act as a dedicated interrupt
20893 +	  processor that services all timer interrupts, but that does not schedule
20894 +          real-time tasks. See RTSS'09 paper for details
20895 +	  (http://www.cs.unc.edu/~anderson/papers.html).
20896 +
20897 +menu "Mixed Criticality"
20898 +
20899 +config PLUGIN_MC
20900 +       bool "Mixed Criticality Scheduler"
20901 +       depends on X86 && SYSFS
20902 +       default y
20903 +       help
20904 +         Include the mixed criticality scheduler. This plugin depends
20905 +         on the global release-master processor for its _REDIRECT and
20906 +         _RELEASE_MASTER options.
20907 +
20908 +         If unsure, say Yes.
20909 +
20910 +config PLUGIN_MC_LEVEL_A_MAX_TASKS
20911 +       int "Maximum level A tasks"
20912 +       depends on PLUGIN_MC
20913 +       range 1 128
20914 +       default 32
20915 +       help
20916 +         The maximum number of level A tasks allowed (per-cpu) in level A.
20917 +
20918 +config PLUGIN_MC_RELEASE_MASTER
20919 +       bool "Release-master support for MC"
20920 +       depends on PLUGIN_MC && RELEASE_MASTER
20921 +       default y
20922 +       help
20923 +         Send all timer interrupts to the system-wide release-master CPU.
20924 +
20925 +config PLUGIN_MC_REDIRECT
20926 +       bool "Redirect Work to Release-master"
20927 +       depends on PLUGIN_MC && RELEASE_MASTER
20928 +       default y
20929 +       help
20930 +         Allow processors to send work involving global state to the
20931 +         release-master cpu in order to avoid excess overheads during
20932 +         partitioned decisions.
20933 +
20934 +config PLUGIN_VIRTUAL_TIME
20935 +       bool "Use virtual timer to recover from overloaded situation"
20936 +       depends on PLUGIN_MC
20937 +       default y
20938 +       help
20939 +         Virtual timer implementation.
20940 +endmenu
20941  
20942  endmenu
20943  
20944 @@ -46,28 +104,19 @@ config NP_SECTION
20945            Note that plugins still need to explicitly support non-preemptivity.
20946            Currently, only GSN-EDF and PSN-EDF have such support.
20947  
20948 -	  This is required to support the FMLP.
20949 +	  This is required to support locking protocols such as the FMLP.
20950  	  If disabled, all tasks will be considered preemptable at all times.
20951  
20952 -config SRP
20953 -	bool "Stack Resource Policy (SRP)"
20954 -	default n
20955 -	help
20956 -	  Include support for Baker's Stack Resource Policy.
20957 -
20958 -	  Say Yes if you want FMLP local long critical section
20959 -	  synchronization support.
20960 -
20961 -config FMLP
20962 -	bool "FMLP support"
20963 +config LITMUS_LOCKING
20964 +        bool "Support for real-time locking protocols"
20965  	depends on NP_SECTION
20966  	default n
20967  	help
20968 -	  Include support for deterministic multiprocessor real-time
20969 -	  synchronization support.
20970 +	  Enable LITMUS^RT's deterministic multiprocessor real-time
20971 +	  locking protocols.
20972  
20973 -	  Say Yes if you want FMLP long critical section
20974 -	  synchronization support.
20975 +	  Say Yes if you want to include locking protocols such as the FMLP and
20976 +	  Baker's SRP.
20977  
20978  endmenu
20979  
20980 @@ -111,7 +160,7 @@ config SCHED_TASK_TRACE
20981  config SCHED_TASK_TRACE_SHIFT
20982         int "Buffer size for sched_trace_xxx() events"
20983         depends on SCHED_TASK_TRACE
20984 -       range 8 13
20985 +       range 8 20
20986         default 9
20987         help
20988  
20989 @@ -152,7 +201,7 @@ config SCHED_DEBUG_TRACE
20990  config SCHED_DEBUG_TRACE_SHIFT
20991         int "Buffer size for TRACE() buffer"
20992         depends on SCHED_DEBUG_TRACE
20993 -       range 14 22
20994 +       range 14 24
20995         default 18
20996         help
20997  
20998 diff --git a/litmus/Makefile b/litmus/Makefile
20999 index b7366b5..b6ca7ab 100644
21000 --- a/litmus/Makefile
21001 +++ b/litmus/Makefile
21002 @@ -6,20 +6,22 @@ obj-y     = sched_plugin.o litmus.o \
21003  	    preempt.o \
21004  	    litmus_proc.o \
21005  	    budget.o \
21006 +	    clustered.o \
21007  	    jobs.o \
21008  	    sync.o \
21009  	    rt_domain.o \
21010  	    edf_common.o \
21011  	    fdso.o \
21012 +	    locking.o \
21013  	    srp.o \
21014 -	    fmlp.o \
21015  	    bheap.o \
21016  	    ctrldev.o \
21017 -	    sched_gsn_edf.o \
21018 -	    sched_psn_edf.o
21019 +	    domain.o
21020  
21021  obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
21022  obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
21023 +obj-$(CONFIG_PLUGIN_MC) += sched_mc.o sched_mc_ce.o ce_domain.o
21024 +obj-$(CONFIG_MERGE_TIMERS) += event_group.o
21025  
21026  obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
21027  obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
21028 diff --git a/litmus/bheap.c b/litmus/bheap.c
21029 index 528af97..42122d8 100644
21030 --- a/litmus/bheap.c
21031 +++ b/litmus/bheap.c
21032 @@ -248,13 +248,14 @@ int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node)
21033  void bheap_delete(bheap_prio_t higher_prio, struct bheap* heap,
21034  		 struct bheap_node* node)
21035  {
21036 -	struct bheap_node *parent, *prev, *pos;
21037 +	struct bheap_node *parent, *prev, *pos, *old;
21038  	struct bheap_node** tmp_ref;
21039  	void* tmp;
21040  
21041  	if (heap->min != node) {
21042  		/* bubble up */
21043  		parent = node->parent;
21044 +		old = node;
21045  		while (parent) {
21046  			/* swap parent and node */
21047  			tmp           = parent->value;
21048 diff --git a/litmus/budget.c b/litmus/budget.c
21049 index 310e9a3..f39a51d 100644
21050 --- a/litmus/budget.c
21051 +++ b/litmus/budget.c
21052 @@ -4,6 +4,12 @@
21053  
21054  #include <litmus/litmus.h>
21055  #include <litmus/preempt.h>
21056 +#include <litmus/budget.h>
21057 +#include <litmus/sched_trace.h>
21058 +	
21059 +struct mc_task;
21060 +struct mc_job;
21061 +struct mc_data;
21062  
21063  struct enforcement_timer {
21064  	/* The enforcement timer is used to accurately police
21065 @@ -108,4 +114,46 @@ static int __init init_budget_enforcement(void)
21066  	return 0;
21067  }
21068  
21069 +void task_release(struct task_struct *t)
21070 +{
21071 +    t->rt_param.job_params.real_release = t->rt_param.job_params.real_deadline;
21072 +	t->rt_param.job_params.real_deadline += get_rt_period(t);
21073 +	t->rt_param.job_params.job_no++;
21074 +    sched_trace_task_release(t);
21075 +}
21076 +
21077 +void server_release(struct task_struct *t)
21078 +{
21079 +	t->rt_param.job_params.exec_time = 0;
21080 +	t->rt_param.job_params.release = t->rt_param.job_params.deadline;
21081 +	t->rt_param.job_params.deadline +=  get_rt_period(t);
21082 +	
21083 +	/* don't confuse linux */
21084 +	t->rt.time_slice = 1;
21085 +}
21086 +
21087 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
21088 +struct _rt_domain;
21089 +extern pid_t signal_pid;
21090 +extern struct _rt_domain _mc_crit_c_rt;
21091 +
21092 +void server_release_crit_c(struct task_struct *t)
21093 +{
21094 +    lt_t virt_last_release;
21095 +    if (t->rt_param.domain)
21096 +        t->rt_param.domain = &_mc_crit_c_rt;
21097 +    if (t->rt_param.job_params.act_last_release == 0)
21098 +        t->rt_param.job_params.act_last_release = t->rt_param.job_params.release - get_rt_period(t);
21099 +    
21100 +    virt_last_release = act_to_virt(&_mc_crit_c_rt, t->rt_param.job_params.act_last_release);
21101 +    
21102 +	t->rt_param.job_params.placeholder = 1;
21103 +	t->rt_param.job_params.act_priority_point = 0;
21104 +	t->rt_param.job_params.release = virt_to_act(&_mc_crit_c_rt, virt_last_release + get_rt_period(t));
21105 +	t->rt_param.job_params.deadline = virt_to_act(&_mc_crit_c_rt, virt_last_release + 2*get_rt_period(t));
21106 +	t->rt_param.job_params.virt_priority_point = act_to_virt(&_mc_crit_c_rt, t->rt_param.job_params.release) + t->rt_param.task_params.relative_pp;
21107 +    t->rt_param.job_params.act_last_release = t->rt_param.job_params.release;
21108 +}
21109 +#endif
21110 +
21111  module_init(init_budget_enforcement);
21112 diff --git a/litmus/ce_domain.c b/litmus/ce_domain.c
21113 new file mode 100644
21114 index 0000000..b2c5d4e
21115 --- /dev/null
21116 +++ b/litmus/ce_domain.c
21117 @@ -0,0 +1,102 @@
21118 +#include <linux/pid.h>
21119 +#include <linux/sched.h>
21120 +#include <linux/hrtimer.h>
21121 +#include <linux/slab.h>
21122 +
21123 +#include <litmus/litmus.h>
21124 +#include <litmus/debug_trace.h>
21125 +#include <litmus/rt_param.h>
21126 +#include <litmus/domain.h>
21127 +#include <litmus/event_group.h>
21128 +#include <litmus/sched_mc.h>
21129 +#include <litmus/ce_domain.h>
21130 +
21131 +/*
21132 + * Called for:
21133 + * task_new
21134 + * job_completion
21135 + * wake_up
21136 + */
21137 +void ce_requeue(domain_t *dom, struct task_struct *ts)
21138 +{
21139 +	const struct ce_dom_data *ce_data = dom->data;
21140 +	const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id;
21141 +	const unsigned int just_finished = tsk_rt(ts)->job_params.job_no;
21142 +	const unsigned int expected_job =
21143 +		mc_ce_get_expected_job(ce_data->cpu, idx);
21144 +	const int asleep = RT_F_SLEEP == get_rt_flags(ts);
21145 +
21146 +	TRACE_MC_TASK(ts, "entered ce_requeue. asleep: %d  just_finished: %3u  "
21147 +			"expected_job: %3u\n",
21148 +			asleep, just_finished, expected_job);
21149 +
21150 +	tsk_mc_data(ts)->mc_task.lvl_a_eligible = 1;
21151 +
21152 +	/* When coming from job completion, the task will be asleep. */
21153 +	if (asleep && just_finished < expected_job) {
21154 +		TRACE_MC_TASK(ts, "appears behind\n");
21155 +	} else if (asleep && expected_job < just_finished) {
21156 +		TRACE_MC_TASK(ts, "job %u completed in expected job %u which "
21157 +				"seems too early\n", just_finished,
21158 +				expected_job);
21159 +	}
21160 +}
21161 +
21162 +/*
21163 + *
21164 + */
21165 +void ce_remove(domain_t *dom, struct task_struct *ts)
21166 +{
21167 +	tsk_mc_data(ts)->mc_task.lvl_a_eligible = 0;
21168 +}
21169 +
21170 +/*
21171 + * ce_take_ready and ce_peek_ready
21172 + */
21173 +struct task_struct* ce_peek_and_take_ready(domain_t *dom)
21174 +{
21175 +	const struct ce_dom_data *ce_data = dom->data;
21176 +	struct task_struct *ret = NULL, *sched = ce_data->should_schedule;
21177 +	const int exists = NULL != sched;
21178 +	const int blocked = exists && !is_running(sched);
21179 +	const int elig = exists && tsk_mc_data(sched) &&
21180 +		tsk_mc_data(sched)->mc_task.lvl_a_eligible;
21181 +
21182 +	/* Return the task we should schedule if it is not blocked or sleeping. */
21183 +	if (exists && !blocked && elig)
21184 +		ret = sched;
21185 +	return ret;
21186 +}
21187 +
21188 +int ce_higher_prio(struct task_struct *a, struct task_struct *b)
21189 +{
21190 +	const domain_t *dom = get_task_domain(a);
21191 +	const struct ce_dom_data *ce_data = dom->data;
21192 +	return (a != b && a == ce_data->should_schedule);
21193 +}
21194 +
21195 +void ce_domain_init(domain_t *dom,
21196 +		raw_spinlock_t *lock,
21197 +		requeue_t requeue,
21198 +		peek_ready_t peek_ready,
21199 +		take_ready_t take_ready,
21200 +		preempt_needed_t preempt_needed,
21201 +		task_prio_t task_prio,
21202 +		struct ce_dom_data *dom_data,
21203 +		const int cpu,
21204 +		ce_timer_callback_t ce_timer_callback)
21205 +{
21206 +	domain_init(dom, lock, requeue, peek_ready, take_ready, preempt_needed,
21207 +			task_prio);
21208 +	dom->data = dom_data;
21209 +	dom->remove = ce_remove;
21210 +	dom_data->cpu = cpu;
21211 +#ifdef CONFIG_MERGE_TIMERS
21212 +	init_event(&dom_data->event, CRIT_LEVEL_A, ce_timer_callback,
21213 +			event_list_alloc(GFP_ATOMIC));
21214 +#else
21215 +	hrtimer_start_on_info_init(&dom_data->timer_info);
21216 +	hrtimer_init(&dom_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
21217 +	dom_data->timer.function = ce_timer_callback;
21218 +#endif
21219 +}
21220 diff --git a/litmus/clustered.c b/litmus/clustered.c
21221 new file mode 100644
21222 index 0000000..04450a8
21223 --- /dev/null
21224 +++ b/litmus/clustered.c
21225 @@ -0,0 +1,111 @@
21226 +#include <linux/gfp.h>
21227 +#include <linux/cpumask.h>
21228 +#include <linux/list.h>
21229 +
21230 +#include <litmus/clustered.h>
21231 +
21232 +#ifndef CONFIG_X86
21233 +/* fake get_shared_cpu_map() on non-x86 architectures */
21234 +
21235 +int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, int index)
21236 +{
21237 +	if (index != 1)
21238 +		return 1;
21239 +	else {
21240 +		/* Fake L1: CPU is all by itself. */
21241 +		cpumask_clear(mask);
21242 +		cpumask_set_cpu(cpu, mask);
21243 +		return 0;
21244 +	}
21245 +}
21246 +
21247 +#endif
21248 +
21249 +int get_cluster_size(enum cache_level level)
21250 +{
21251 +	cpumask_var_t mask;
21252 +	int ok;
21253 +	int num_cpus;
21254 +
21255 +	if (level == GLOBAL_CLUSTER)
21256 +		return num_online_cpus();
21257 +	else {
21258 +		if (!zalloc_cpumask_var(&mask, GFP_ATOMIC))
21259 +			return -ENOMEM;
21260 +		/* assumes CPU 0 is representative of all CPUs */
21261 +		ok = get_shared_cpu_map(mask, 0, level);
21262 +		/* ok == 0 means we got the map; otherwise it's an invalid cache level */
21263 +		if (ok == 0)
21264 +			num_cpus = cpumask_weight(mask);
21265 +		free_cpumask_var(mask);
21266 +
21267 +		if (ok == 0)
21268 +			return num_cpus;
21269 +		else
21270 +			return -EINVAL;
21271 +	}
21272 +}
21273 +
21274 +int assign_cpus_to_clusters(enum cache_level level,
21275 +			    struct scheduling_cluster* clusters[],
21276 +			    unsigned int num_clusters,
21277 +			    struct cluster_cpu* cpus[],
21278 +			    unsigned int num_cpus)
21279 +{
21280 +	cpumask_var_t mask;
21281 +	unsigned int i, free_cluster = 0, low_cpu;
21282 +	int err = 0;
21283 +
21284 +	if (!zalloc_cpumask_var(&mask, GFP_ATOMIC))
21285 +		return -ENOMEM;
21286 +
21287 +	/* clear cluster pointers */
21288 +	for (i = 0; i < num_cpus; i++) {
21289 +		cpus[i]->id      = i;
21290 +		cpus[i]->cluster = NULL;
21291 +	}
21292 +
21293 +	/* initialize clusters */
21294 +	for (i = 0; i < num_clusters; i++) {
21295 +		clusters[i]->id = i;
21296 +		INIT_LIST_HEAD(&clusters[i]->cpus);
21297 +	}
21298 +
21299 +	/* Assign each CPU. Two assumtions are made:
21300 +	 * 1) The index of a cpu in cpus corresponds to its processor id (i.e., the index in a cpu mask).
21301 +	 * 2) All cpus that belong to some cluster are online.
21302 +	 */
21303 +	for_each_online_cpu(i) {
21304 +		/* get lowest-id CPU in cluster */
21305 +		if (level != GLOBAL_CLUSTER) {
21306 +			err = get_shared_cpu_map(mask, cpus[i]->id, level);
21307 +			if (err != 0) {
21308 +				/* ugh... wrong cache level? Either caller screwed up
21309 +				 * or the CPU topology is weird. */
21310 +				printk(KERN_ERR "Could not set up clusters for L%d sharing (max: L%d).\n",
21311 +				       level, err);
21312 +				err = -EINVAL;
21313 +				goto out;
21314 +			}
21315 +			low_cpu = cpumask_first(mask);
21316 +		} else
21317 +			low_cpu = 0;
21318 +		if (low_cpu == i) {
21319 +			/* caller must provide an appropriate number of clusters */
21320 +			BUG_ON(free_cluster >= num_clusters);
21321 +
21322 +			/* create new cluster */
21323 +			cpus[i]->cluster = clusters[free_cluster++];
21324 +		} else {
21325 +			/* low_cpu points to the right cluster
21326 +			 * Assumption: low_cpu is actually online and was processed earlier. */
21327 +			cpus[i]->cluster = cpus[low_cpu]->cluster;
21328 +		}
21329 +		/* enqueue in cpus list */
21330 +		list_add(&cpus[i]->cluster_list, &cpus[i]->cluster->cpus);
21331 +		printk(KERN_INFO "Assigning CPU%u to cluster %u\n.", i, cpus[i]->cluster->id);
21332 +	}
21333 +out:
21334 +	free_cpumask_var(mask);
21335 +	return err;
21336 +}
21337 diff --git a/litmus/domain.c b/litmus/domain.c
21338 new file mode 100644
21339 index 0000000..4dc3649
21340 --- /dev/null
21341 +++ b/litmus/domain.c
21342 @@ -0,0 +1,21 @@
21343 +#include <linux/list.h>
21344 +#include <linux/spinlock_types.h>
21345 +
21346 +#include <litmus/domain.h>
21347 +
21348 +void domain_init(domain_t *dom,
21349 +		 raw_spinlock_t *lock,
21350 +		 requeue_t requeue,
21351 +		 peek_ready_t peek_ready,
21352 +		 take_ready_t take_ready,
21353 +		 preempt_needed_t preempt_needed,
21354 +		 task_prio_t priority)
21355 +{
21356 +	INIT_LIST_HEAD(&dom->list);
21357 +	dom->lock = lock;
21358 +	dom->requeue = requeue;
21359 +	dom->peek_ready = peek_ready;
21360 +	dom->take_ready = take_ready;
21361 +	dom->preempt_needed = preempt_needed;
21362 +	dom->higher_prio = priority;
21363 +}
21364 diff --git a/litmus/edf_common.c b/litmus/edf_common.c
21365 index 06daec6..ad97798 100644
21366 --- a/litmus/edf_common.c
21367 +++ b/litmus/edf_common.c
21368 @@ -14,6 +14,35 @@
21369  
21370  #include <litmus/edf_common.h>
21371  
21372 +/* real-time comparison macros */
21373 +#define earlier_priority_point(a, b) (lt_before(\
21374 +	get_priority_point(a),\
21375 +	get_priority_point(b)))
21376 +
21377 +#define earlier_virt_priority_point(a, b) (lt_before(\
21378 +	get_virt_priority_point(a),\
21379 +	get_virt_priority_point(b)))
21380 +    
21381 +
21382 +inline lt_t get_priority_point(struct task_struct* t)
21383 +{
21384 +	unsigned int num_cpus = num_online_cpus();
21385 +	return t->rt_param.job_params.release +
21386 +           t->rt_param.task_params.period
21387 +	       - (((num_cpus - 1) * t->rt_param.task_params.exec_cost)
21388 +	       / (num_cpus));
21389 +}
21390 +
21391 +inline lt_t get_virt_priority_point(struct task_struct* t)
21392 +{
21393 +    return t->rt_param.job_params.virt_priority_point;
21394 +}
21395 +
21396 +inline lt_t get_act_priority_point(struct task_struct* t)
21397 +{
21398 +    return t->rt_param.job_params.act_priority_point;
21399 +}
21400 +
21401  /* edf_higher_prio -  returns true if first has a higher EDF priority
21402   *                    than second. Deadline ties are broken by PID.
21403   *
21404 @@ -33,22 +62,38 @@ int edf_higher_prio(struct task_struct* first,
21405  	}
21406  
21407  
21408 +	/* check for NULL tasks */
21409 +	if (!first || !second)
21410 +		return first && !second;
21411 +
21412 +#ifdef CONFIG_LITMUS_LOCKING
21413 +
21414  	/* Check for inherited priorities. Change task
21415  	 * used for comparison in such a case.
21416  	 */
21417 -	if (first && first->rt_param.inh_task)
21418 +	if (unlikely(first->rt_param.inh_task))
21419  		first_task = first->rt_param.inh_task;
21420 -	if (second && second->rt_param.inh_task)
21421 +	if (unlikely(second->rt_param.inh_task))
21422  		second_task = second->rt_param.inh_task;
21423  
21424 -	return
21425 -		/* it has to exist in order to have higher priority */
21426 -		first_task && (
21427 -		/* does the second task exist and is it a real-time task?  If
21428 -		 * not, the first task (which is a RT task) has higher
21429 -		 * priority.
21430 -		 */
21431 -		!second_task || !is_realtime(second_task)  ||
21432 +	/* Check for priority boosting. Tie-break by start of boosting.
21433 +	 */
21434 +	if (unlikely(is_priority_boosted(first_task))) {
21435 +		/* first_task is boosted, how about second_task? */
21436 +		if (!is_priority_boosted(second_task) ||
21437 +		    lt_before(get_boost_start(first_task),
21438 +			      get_boost_start(second_task)))
21439 +			return 1;
21440 +		else
21441 +			return 0;
21442 +	} else if (unlikely(is_priority_boosted(second_task)))
21443 +		/* second_task is boosted, first is not*/
21444 +		return 0;
21445 +
21446 +#endif
21447 +
21448 +
21449 +	return !is_realtime(second_task)  ||
21450  
21451  		/* is the deadline of the first task earlier?
21452  		 * Then it has higher priority.
21453 @@ -65,7 +110,76 @@ int edf_higher_prio(struct task_struct* first,
21454  		 * priority wins.
21455  		 */
21456  		(first_task->pid == second_task->pid &&
21457 -		 !second->rt_param.inh_task))));
21458 +		 !second->rt_param.inh_task)));
21459 +}
21460 +
21461 +int gel_higher_prio(struct task_struct* first,
21462 +		    struct task_struct* second)
21463 +{
21464 +	struct task_struct *first_task = first;
21465 +	struct task_struct *second_task = second;
21466 +
21467 +	/* There is no point in comparing a task to itself. */
21468 +	if (first && first == second) {
21469 +		TRACE_TASK(first,
21470 +			   "WARNING: pointless edf priority comparison.\n");
21471 +		return 0;
21472 +	}
21473 +
21474 +	/* check for NULL tasks */
21475 +	if (!first || !second)
21476 +		return first && !second;
21477 +
21478 +#ifdef CONFIG_LITMUS_LOCKING
21479 +
21480 +	/* Check for inherited priorities. Change task
21481 +	 * used for comparison in such a case.
21482 +	 */
21483 +	if (unlikely(first->rt_param.inh_task))
21484 +		first_task = first->rt_param.inh_task;
21485 +	if (unlikely(second->rt_param.inh_task))
21486 +		second_task = second->rt_param.inh_task;
21487 +
21488 +	/* Check for priority boosting. Tie-break by start of boosting.
21489 +	 */
21490 +	if (unlikely(is_priority_boosted(first_task))) {
21491 +		/* first_task is boosted, how about second_task? */
21492 +		if (!is_priority_boosted(second_task) ||
21493 +		    lt_before(get_boost_start(first_task),
21494 +			      get_boost_start(second_task)))
21495 +			return 1;
21496 +		else
21497 +			return 0;
21498 +	} else if (unlikely(is_priority_boosted(second_task)))
21499 +		/* second_task is boosted, first is not*/
21500 +		return 0;
21501 +
21502 +#endif
21503 +
21504 +
21505 +	return !is_realtime(second_task)  ||
21506 +
21507 +		/* is the deadline of the first task earlier?
21508 +		 * Then it has higher priority.
21509 +		 */
21510 +		earlier_virt_priority_point(first_task, second_task) ||
21511 +
21512 +		/* Do we have a deadline tie?
21513 +		 * Then break by PID.
21514 +		 */
21515 +		(get_virt_priority_point(first_task) == get_virt_priority_point(second_task) &&
21516 +	        (first_task->pid < second_task->pid ||
21517 +
21518 +		/* If the PIDs are the same then the task with the inherited
21519 +		 * priority wins.
21520 +		 */
21521 +		(first_task->pid == second_task->pid &&
21522 +		 !second->rt_param.inh_task)));
21523 +}
21524 +
21525 +int gel_ready_order(struct bheap_node* a, struct bheap_node* b)
21526 +{
21527 +	return gel_higher_prio(bheap2task(a), bheap2task(b));
21528  }
21529  
21530  int edf_ready_order(struct bheap_node* a, struct bheap_node* b)
21531 @@ -99,4 +213,4 @@ int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t)
21532  
21533  	/* make sure to get non-rt stuff out of the way */
21534  	return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t);
21535 -}
21536 +}
21537 \ No newline at end of file
21538 diff --git a/litmus/event_group.c b/litmus/event_group.c
21539 new file mode 100644
21540 index 0000000..478698a
21541 --- /dev/null
21542 +++ b/litmus/event_group.c
21543 @@ -0,0 +1,334 @@
21544 +#include <linux/slab.h>
21545 +#include <linux/sched.h>
21546 +#include <linux/module.h>
21547 +
21548 +#include <litmus/litmus.h>
21549 +#include <litmus/trace.h>
21550 +#include <litmus/sched_trace.h>
21551 +#include <litmus/event_group.h>
21552 +
21553 +#if 1
21554 +#define VTRACE(fmt, args...)					\
21555 +sched_trace_log_message("%d P%d        [%s@%s:%d]: " fmt,	\
21556 +				TRACE_ARGS,  ## args)
21557 +#else
21558 +#define VTRACE(fmt, args...)
21559 +#endif
21560 +
21561 +/*
21562 + * Return event_queue slot for the given time.
21563 + */
21564 +static unsigned int time2slot(lt_t time)
21565 +{
21566 +	return (unsigned int) time2quanta(time, FLOOR) % EVENT_QUEUE_SLOTS;
21567 +}
21568 +
21569 +/*
21570 + * Executes events from an event_list in priority order.
21571 + * Events can requeue themselves when they are called.
21572 + */
21573 +static enum hrtimer_restart on_timer(struct hrtimer *timer)
21574 +{
21575 +	int prio, num;
21576 +	unsigned long flags;
21577 +	struct event_list *el;
21578 +	struct rt_event *e;
21579 +	struct list_head *pos, events[NUM_EVENT_PRIORITIES];
21580 +	raw_spinlock_t *queue_lock;
21581 +
21582 +	el = container_of(timer, struct event_list, timer);
21583 +	queue_lock = &el->group->queue_lock;
21584 +
21585 +	raw_spin_lock_irqsave(queue_lock, flags);
21586 +
21587 +	/* Remove event_list from hashtable so that no more events
21588 +	 * are added to it.
21589 +	 */
21590 +	VTRACE("Removing event list 0x%x\n", el);
21591 +	list_del_init(&el->queue_node);
21592 +
21593 +	/* Copy over events so that the event_list can re-used when the lock
21594 +	 * is released.
21595 +	 */
21596 +	VTRACE("Emptying event list 0x%x\n", el);
21597 +	for (prio = 0; prio < NUM_EVENT_PRIORITIES; prio++) {
21598 +		list_replace_init(&el->events[prio], &events[prio]);
21599 +	}
21600 +
21601 +	for (prio = 0; prio < NUM_EVENT_PRIORITIES; prio++) {
21602 +		/* Fire events. Complicated loop is used so that events
21603 +		 * in the list can be canceled (removed) while other events are
21604 +		 * executing.
21605 +		 */
21606 +		for (pos = events[prio].next, num = 0;
21607 +		     prefetch(pos->next), events[prio].next != &events[prio];
21608 +		     pos = events[prio].next, num++) {
21609 +
21610 +			e = list_entry(pos, struct rt_event, events_node);
21611 +			list_del_init(pos);
21612 +			raw_spin_unlock_irqrestore(queue_lock, flags);
21613 +
21614 +			VTRACE("Dequeueing event 0x%x with prio %d from 0x%x\n",
21615 +			       e, e->prio, el);
21616 +			e->function(e);
21617 +
21618 +			raw_spin_lock_irqsave(queue_lock, flags);
21619 +		}
21620 +	}
21621 +	raw_spin_unlock_irqrestore(queue_lock, flags);
21622 +
21623 +	VTRACE("Exhausted %d events from list 0x%x\n", num, el);
21624 +
21625 +	return HRTIMER_NORESTART;
21626 +}
21627 +
21628 +/*
21629 + * Return event_list for the given event and time. If no event_list
21630 + * is being used yet and use_event_heap is 1, will create the list
21631 + * and return it. Otherwise it will return NULL.
21632 + */
21633 +static struct event_list* get_event_list(struct event_group *group,
21634 +					 struct rt_event *e,
21635 +					 lt_t fire,
21636 +					 int use_event_list)
21637 +{
21638 +	struct list_head* pos;
21639 +	struct event_list *el = NULL, *tmp;
21640 +	unsigned int slot = time2slot(fire);
21641 +	int remaining = 300;
21642 +
21643 +	VTRACE("Getting list for time %llu, event 0x%x\n", fire, e);
21644 +
21645 +	/* Initialize pos for the case that the list is empty */
21646 +	pos = group->event_queue[slot].next;
21647 +	list_for_each(pos, &group->event_queue[slot]) {
21648 +		BUG_ON(remaining-- < 0);
21649 +		tmp = list_entry(pos, struct event_list, queue_node);
21650 +		if (lt_after_eq(fire, tmp->fire_time) &&
21651 +		    lt_before(fire, tmp->fire_time + group->res)) {
21652 +			VTRACE("Found match 0x%x at time %llu\n",
21653 +			       tmp, tmp->fire_time);
21654 +			el = tmp;
21655 +			break;
21656 +		} else if (lt_before(fire, tmp->fire_time)) {
21657 +			/* We need to insert a new node since el is
21658 +			 * already in the future
21659 +			 */
21660 +			VTRACE("Time %llu was before %llu\n",
21661 +			       fire, tmp->fire_time);
21662 +			break;
21663 +		} else {
21664 +			VTRACE("Time %llu was after %llu\n",
21665 +			       fire, tmp->fire_time + group->res);
21666 +		}
21667 +	}
21668 +	if (!el && use_event_list) {
21669 +		/* Use pre-allocated list */
21670 +		tmp = e->event_list;
21671 +		tmp->fire_time = fire;
21672 +		tmp->group = group;
21673 +		/* Add to queue */
21674 +		VTRACE("Using list 0x%x for priority %d and time %llu\n",
21675 +		       tmp, e->prio, fire);
21676 +		BUG_ON(!list_empty(&tmp->queue_node));
21677 +		list_add(&tmp->queue_node, pos->prev);
21678 +		el = tmp;
21679 +	}
21680 +	return el;
21681 +}
21682 +
21683 +/*
21684 + * Prepare a release list for a new set of events.
21685 + */
21686 +static void reinit_event_list(struct event_group *group, struct rt_event *e)
21687 +{
21688 +	int prio, t_ret;
21689 +	struct event_list *el = e->event_list;
21690 +
21691 +	VTRACE("Reinitting list 0x%x for event 0x%x\n", el, e);
21692 +
21693 +	/* Cancel timer */
21694 +	t_ret = hrtimer_pull_cancel(group->cpu, &el->timer, &el->info);
21695 +	BUG_ON(t_ret == 1);
21696 +	if (t_ret == -1) {
21697 +		/* The on_timer callback is running for this list */
21698 +		VTRACE("Timer is running concurrently!\n");
21699 +	}
21700 +	/* Clear event lists */
21701 +	for (prio = 0; prio < NUM_EVENT_PRIORITIES; prio++)
21702 +		INIT_LIST_HEAD(&el->events[prio]);
21703 +}
21704 +
21705 +/**
21706 + * add_event() - Add timer to event group.
21707 + */
21708 +void add_event(struct event_group *group, struct rt_event *e, lt_t fire)
21709 +{
21710 +	struct event_list *el;
21711 +	int in_use;
21712 +
21713 +	VTRACE("Adding event 0x%x with priority %d for time %llu\n",
21714 +	       e, e->prio, fire);
21715 +
21716 +	/* A NULL group means use the group of the currently executing CPU  */
21717 +	if (NULL == group)
21718 +		group = get_event_group_for(NO_CPU);
21719 +	/* Saving the group is important for cancellations */
21720 +	e->_event_group = group;
21721 +
21722 +	raw_spin_lock(&group->queue_lock);
21723 +	el = get_event_list(group, e, fire, 0);
21724 +	if (!el) {
21725 +		/* Use our own, but drop lock first */
21726 +		raw_spin_unlock(&group->queue_lock);
21727 +		reinit_event_list(group, e);
21728 +		raw_spin_lock(&group->queue_lock);
21729 +		el = get_event_list(group, e, fire, 1);
21730 +	}
21731 +
21732 +	/* Add event to sorted list */
21733 +	VTRACE("Inserting event 0x%x at end of event_list 0x%x\n", e, el);
21734 +	list_add(&e->events_node, &el->events[e->prio]);
21735 +	raw_spin_unlock(&group->queue_lock);
21736 +
21737 +	/* Arm timer if we are the owner */
21738 +	if (el == e->event_list) {
21739 +		VTRACE("Arming timer on event 0x%x for %llu\n", e, fire);
21740 +		in_use = hrtimer_start_on(group->cpu, &el->info,
21741 +					  &el->timer, ns_to_ktime(el->fire_time),
21742 +					  HRTIMER_MODE_ABS_PINNED);
21743 +		BUG_ON(in_use);
21744 +	} else {
21745 +		VTRACE("Not my timer @%llu\n", fire);
21746 +	}
21747 +}
21748 +
21749 +/**
21750 + * cancel_event() - Remove event from the group.
21751 + */
21752 +void cancel_event(struct rt_event *e)
21753 +{
21754 +	int prio, cancel;
21755 +	struct rt_event *swap, *entry;
21756 +	struct event_list *tmp;
21757 +	struct event_group *group;
21758 +	struct list_head *list, *pos;
21759 +
21760 +	VTRACE("Canceling event 0x%x with priority %d\n", e, e->prio);
21761 +	group = e->_event_group;
21762 +	if (!group) return;
21763 +
21764 +	raw_spin_lock(&group->queue_lock);
21765 +
21766 +	/* Relies on the fact that an event_list's owner is ALWAYS present
21767 +	 * as one of the event_list's events.
21768 +	 */
21769 +	for (prio = 0, cancel = 0, swap = NULL;
21770 +	     prio < NUM_EVENT_PRIORITIES && !swap;
21771 +	     prio++) {
21772 +
21773 +		list = &e->event_list->events[prio];
21774 +		cancel |= !list_empty(list);
21775 +
21776 +		/* Find any element which is not the event_list's owner */
21777 +		list_for_each(pos, list) {
21778 +			entry = list_entry(pos, struct rt_event, events_node);
21779 +			if (entry != e) {
21780 +				swap = entry;
21781 +				break;
21782 +			}
21783 +		}
21784 +	}
21785 +
21786 +	if (swap) {
21787 +		/* Give the other guy ownership of the event_list */
21788 +		VTRACE("Swapping list 0x%x with event 0x%x event list 0x%x\n",
21789 +		       e->event_list, swap, swap->event_list);
21790 +		tmp = swap->event_list;
21791 +		swap->event_list = e->event_list;
21792 +		BUG_ON(!tmp);
21793 +		e->event_list = tmp;
21794 +	} else if (cancel) {
21795 +		/* Cancel the event_list we own */
21796 +		hrtimer_pull_cancel(group->cpu,
21797 +				    &e->event_list->timer,
21798 +				    &e->event_list->info);
21799 +		list_del_init(&e->event_list->queue_node);
21800 +	}
21801 +	/* Remove ourselves from any list we may be a part of */
21802 +	list_del_init(&e->events_node);
21803 +	e->_event_group = NULL;
21804 +
21805 +	raw_spin_unlock(&group->queue_lock);
21806 +}
21807 +
21808 +struct kmem_cache *event_list_cache;
21809 +
21810 +struct event_list* event_list_alloc(int gfp_flags)
21811 +{
21812 +	int prio;
21813 +	struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags);
21814 +	if (el) {
21815 +		hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
21816 +		INIT_LIST_HEAD(&el->queue_node);
21817 +		el->timer.function = on_timer;
21818 +		hrtimer_start_on_info_init(&el->info);
21819 +		for (prio = 0; prio < NUM_EVENT_PRIORITIES; prio++)
21820 +			INIT_LIST_HEAD(&el->events[prio]);
21821 +	} else {
21822 +		VTRACE("Failed to allocate event list!\n");
21823 +		printk(KERN_CRIT "Failed to allocate event list.\n");
21824 +		BUG();
21825 +	}
21826 +	return el;
21827 +}
21828 +
21829 +void init_event(struct rt_event *e, int prio, fire_event_t function,
21830 +		struct event_list *el)
21831 +{
21832 +	e->prio = prio;
21833 +	e->function = function;
21834 +	e->event_list = el;
21835 +	e->_event_group = NULL;
21836 +	INIT_LIST_HEAD(&e->events_node);
21837 +}
21838 +
21839 +/**
21840 + * init_event_group() - Prepare group for events.
21841 + * @group	Group to prepare
21842 + * @res		Timer resolution. Two events of @res distance will be merged
21843 + * @cpu		Cpu on which to fire timers
21844 + */
21845 +static void init_event_group(struct event_group *group, lt_t res, int cpu)
21846 +{
21847 +	int i;
21848 +	VTRACE("Creating group with resolution %llu on CPU %d", res, cpu);
21849 +	group->res = res;
21850 +	group->cpu = cpu;
21851 +	for (i = 0; i < EVENT_QUEUE_SLOTS; i++)
21852 +		INIT_LIST_HEAD(&group->event_queue[i]);
21853 +	raw_spin_lock_init(&group->queue_lock);
21854 +}
21855 +
21856 +
21857 +DEFINE_PER_CPU(struct event_group, _event_groups);
21858 +
21859 +struct event_group *get_event_group_for(const int cpu)
21860 +{
21861 +	return &per_cpu(_event_groups,
21862 +			(NO_CPU == cpu) ? smp_processor_id() : cpu);
21863 +}
21864 +
21865 +static int __init _init_event_groups(void)
21866 +{
21867 +	int cpu;
21868 +	printk("Initializing LITMUS^RT event groups.\n");
21869 +
21870 +	for_each_online_cpu(cpu) {
21871 +		init_event_group(get_event_group_for(cpu),
21872 +				CONFIG_MERGE_TIMERS_WINDOW, cpu);
21873 +	}
21874 +	return 0;
21875 +}
21876 +
21877 +module_init(_init_event_groups);
21878 diff --git a/litmus/fdso.c b/litmus/fdso.c
21879 index 85be716..aa7b384 100644
21880 --- a/litmus/fdso.c
21881 +++ b/litmus/fdso.c
21882 @@ -18,25 +18,24 @@
21883  
21884  #include <litmus/fdso.h>
21885  
21886 -extern struct fdso_ops fmlp_sem_ops;
21887 -extern struct fdso_ops srp_sem_ops;
21888 +extern struct fdso_ops generic_lock_ops;
21889  
21890  static const struct fdso_ops* fdso_ops[] = {
21891 -	&fmlp_sem_ops,
21892 -	&srp_sem_ops,
21893 +	&generic_lock_ops, /* FMLP_SEM */
21894 +	&generic_lock_ops, /* SRP_SEM */
21895  };
21896  
21897 -static void* fdso_create(obj_type_t type)
21898 +static int fdso_create(void** obj_ref, obj_type_t type, void* __user config)
21899  {
21900  	if (fdso_ops[type]->create)
21901 -		return fdso_ops[type]->create();
21902 +		return fdso_ops[type]->create(obj_ref, type, config);
21903  	else
21904 -		return NULL;
21905 +		return -EINVAL;
21906  }
21907  
21908  static void fdso_destroy(obj_type_t type, void* obj)
21909  {
21910 -	fdso_ops[type]->destroy(obj);
21911 +	fdso_ops[type]->destroy(type, obj);
21912  }
21913  
21914  static int fdso_open(struct od_table_entry* entry, void* __user config)
21915 @@ -56,20 +55,27 @@ static int fdso_close(struct od_table_entry* entry)
21916  }
21917  
21918  /* inode must be locked already */
21919 -static struct inode_obj_id* alloc_inode_obj(struct inode* inode,
21920 -					    obj_type_t type,
21921 -					    unsigned int id)
21922 +static int alloc_inode_obj(struct inode_obj_id** obj_ref,
21923 +			   struct inode* inode,
21924 +			   obj_type_t type,
21925 +			   unsigned int id,
21926 +			   void* __user config)
21927  {
21928  	struct inode_obj_id* obj;
21929  	void* raw_obj;
21930 -
21931 -	raw_obj = fdso_create(type);
21932 -	if (!raw_obj)
21933 -		return NULL;
21934 +	int err;
21935  
21936  	obj = kmalloc(sizeof(*obj), GFP_KERNEL);
21937 -	if (!obj)
21938 -		return NULL;
21939 +	if (!obj) {
21940 +		return -ENOMEM;
21941 +	}
21942 +
21943 +	err = fdso_create(&raw_obj, type, config);
21944 +	if (err != 0) {
21945 +		kfree(obj);
21946 +		return err;
21947 +	}
21948 +
21949  	INIT_LIST_HEAD(&obj->list);
21950  	atomic_set(&obj->count, 1);
21951  	obj->type  = type;
21952 @@ -81,7 +87,9 @@ static struct inode_obj_id* alloc_inode_obj(struct inode* inode,
21953  	atomic_inc(&inode->i_count);
21954  
21955  	printk(KERN_DEBUG "alloc_inode_obj(%p, %d, %d): object created\n", inode, type, id);
21956 -	return obj;
21957 +
21958 +	*obj_ref = obj;
21959 +	return 0;
21960  }
21961  
21962  /* inode must be locked already */
21963 @@ -170,7 +178,7 @@ void exit_od_table(struct task_struct* t)
21964  static int do_sys_od_open(struct file* file, obj_type_t type, int id,
21965  			  void* __user config)
21966  {
21967 -	int idx = 0, err;
21968 +	int idx = 0, err = 0;
21969  	struct inode* inode;
21970  	struct inode_obj_id* obj = NULL;
21971  	struct od_table_entry* entry;
21972 @@ -184,24 +192,28 @@ static int do_sys_od_open(struct file* file, obj_type_t type, int id,
21973  	mutex_lock(&inode->i_obj_mutex);
21974  	obj = get_inode_obj(inode, type, id);
21975  	if (!obj)
21976 -		obj = alloc_inode_obj(inode, type, id);
21977 -	if (!obj) {
21978 -		idx = -ENOMEM;
21979 +		err = alloc_inode_obj(&obj, inode, type, id, config);
21980 +	if (err != 0) {
21981 +		obj = NULL;
21982 +		idx = err;
21983  		entry->used = 0;
21984  	} else {
21985  		entry->obj   = obj;
21986 -		entry->extra = NULL;
21987 +		entry->class = fdso_ops[type];
21988  		idx = entry - current->od_table;
21989  	}
21990  
21991  	mutex_unlock(&inode->i_obj_mutex);
21992  
21993 -	err = fdso_open(entry, config);
21994 +	/* open only if creation succeeded */
21995 +	if (!err)
21996 +		err = fdso_open(entry, config);
21997  	if (err < 0) {
21998  		/* The class rejected the open call.
21999  		 * We need to clean up and tell user space.
22000  		 */
22001 -		put_od_entry(entry);
22002 +		if (obj)
22003 +			put_od_entry(entry);
22004  		idx = err;
22005  	}
22006  
22007 @@ -209,7 +221,7 @@ static int do_sys_od_open(struct file* file, obj_type_t type, int id,
22008  }
22009  
22010  
22011 -struct od_table_entry* __od_lookup(int od)
22012 +struct od_table_entry* get_entry_for_od(int od)
22013  {
22014  	struct task_struct *t = current;
22015  
22016 diff --git a/litmus/fmlp.c b/litmus/fmlp.c
22017 deleted file mode 100644
22018 index a9a6385..0000000
22019 --- a/litmus/fmlp.c
22020 +++ /dev/null
22021 @@ -1,268 +0,0 @@
22022 -/*
22023 - * FMLP implementation.
22024 - * Much of the code here is borrowed from include/asm-i386/semaphore.h
22025 - */
22026 -
22027 -#include <asm/atomic.h>
22028 -
22029 -#include <linux/semaphore.h>
22030 -#include <linux/sched.h>
22031 -#include <linux/wait.h>
22032 -#include <linux/spinlock.h>
22033 -
22034 -#include <litmus/litmus.h>
22035 -#include <litmus/sched_plugin.h>
22036 -#include <litmus/edf_common.h>
22037 -
22038 -#include <litmus/fdso.h>
22039 -
22040 -#include <litmus/trace.h>
22041 -
22042 -#ifdef CONFIG_FMLP
22043 -
22044 -static  void* create_fmlp_semaphore(void)
22045 -{
22046 -	struct pi_semaphore* sem;
22047 -	int i;
22048 -
22049 -	sem = kmalloc(sizeof(*sem), GFP_KERNEL);
22050 -	if (!sem)
22051 -		return NULL;
22052 -	atomic_set(&sem->count, 1);
22053 -	sem->sleepers = 0;
22054 -	init_waitqueue_head(&sem->wait);
22055 -	sem->hp.task = NULL;
22056 -	sem->holder = NULL;
22057 -	for (i = 0; i < NR_CPUS; i++)
22058 -		sem->hp.cpu_task[i] = NULL;
22059 -	return sem;
22060 -}
22061 -
22062 -static int open_fmlp_semaphore(struct od_table_entry* entry, void* __user arg)
22063 -{
22064 -	if (!fmlp_active())
22065 -		return -EBUSY;
22066 -	return 0;
22067 -}
22068 -
22069 -static void destroy_fmlp_semaphore(void* sem)
22070 -{
22071 -	/* XXX assert invariants */
22072 -	kfree(sem);
22073 -}
22074 -
22075 -struct fdso_ops fmlp_sem_ops = {
22076 -	.create  = create_fmlp_semaphore,
22077 -	.open    = open_fmlp_semaphore,
22078 -	.destroy = destroy_fmlp_semaphore
22079 -};
22080 -
22081 -struct wq_pair {
22082 -	struct task_struct*  tsk;
22083 -	struct pi_semaphore* sem;
22084 -};
22085 -
22086 -static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync,
22087 -			   void *key)
22088 -{
22089 -	struct wq_pair* wqp   = (struct wq_pair*) wait->private;
22090 -	set_rt_flags(wqp->tsk, RT_F_EXIT_SEM);
22091 -	litmus->inherit_priority(wqp->sem, wqp->tsk);
22092 -	TRACE_TASK(wqp->tsk,
22093 -		   "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n");
22094 -	/* point to task for default_wake_function() */
22095 -	wait->private = wqp->tsk;
22096 -	default_wake_function(wait, mode, sync, key);
22097 -
22098 -	/* Always return true since we know that if we encountered a task
22099 -	 * that was already running the wake_up raced with the schedule in
22100 -	 * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled
22101 -	 * immediately and own the lock. We must not wake up another task in
22102 -	 * any case.
22103 -	 */
22104 -	return 1;
22105 -}
22106 -
22107 -/* caller is responsible for locking */
22108 -int edf_set_hp_task(struct pi_semaphore *sem)
22109 -{
22110 -	struct list_head	*tmp, *next;
22111 -	struct task_struct 	*queued;
22112 -	int ret = 0;
22113 -
22114 -	sem->hp.task = NULL;
22115 -	list_for_each_safe(tmp, next, &sem->wait.task_list) {
22116 -		queued  = ((struct wq_pair*)
22117 -			list_entry(tmp, wait_queue_t,
22118 -				   task_list)->private)->tsk;
22119 -
22120 -		/* Compare task prios, find high prio task. */
22121 -		if (edf_higher_prio(queued, sem->hp.task)) {
22122 -			sem->hp.task = queued;
22123 -			ret = 1;
22124 -		}
22125 -	}
22126 -	return ret;
22127 -}
22128 -
22129 -/* caller is responsible for locking */
22130 -int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu)
22131 -{
22132 -	struct list_head	*tmp, *next;
22133 -	struct task_struct 	*queued;
22134 -	int ret = 0;
22135 -
22136 -	sem->hp.cpu_task[cpu] = NULL;
22137 -	list_for_each_safe(tmp, next, &sem->wait.task_list) {
22138 -		queued  = ((struct wq_pair*)
22139 -			list_entry(tmp, wait_queue_t,
22140 -				   task_list)->private)->tsk;
22141 -
22142 -		/* Compare task prios, find high prio task. */
22143 -		if (get_partition(queued) == cpu &&
22144 -		    edf_higher_prio(queued, sem->hp.cpu_task[cpu])) {
22145 -			sem->hp.cpu_task[cpu] = queued;
22146 -			ret = 1;
22147 -		}
22148 -	}
22149 -	return ret;
22150 -}
22151 -
22152 -static int do_fmlp_down(struct pi_semaphore* sem)
22153 -{
22154 -	unsigned long flags;
22155 -	struct task_struct *tsk = current;
22156 -	struct wq_pair pair;
22157 -	int suspended = 1;
22158 -	wait_queue_t wait = {
22159 -		.private = &pair,
22160 -		.func    = rt_pi_wake_up,
22161 -		.task_list = {NULL, NULL}
22162 -	};
22163 -
22164 -	pair.tsk = tsk;
22165 -	pair.sem = sem;
22166 -	spin_lock_irqsave(&sem->wait.lock, flags);
22167 -
22168 -	if (atomic_dec_return(&sem->count) < 0 ||
22169 -	    waitqueue_active(&sem->wait)) {
22170 -		/* we need to suspend */
22171 -		tsk->state = TASK_UNINTERRUPTIBLE;
22172 -		__add_wait_queue_tail_exclusive(&sem->wait, &wait);
22173 -
22174 -		TRACE_CUR("suspends on PI lock %p\n", sem);
22175 -		litmus->pi_block(sem, tsk);
22176 -
22177 -		/* release lock before sleeping */
22178 -		spin_unlock_irqrestore(&sem->wait.lock, flags);
22179 -
22180 -		TS_PI_DOWN_END;
22181 -		preempt_enable_no_resched();
22182 -
22183 -
22184 -		/* we depend on the FIFO order
22185 -		 * Thus, we don't need to recheck when we wake up, we
22186 -		 * are guaranteed to have the lock since there is only one
22187 -		 * wake up per release
22188 -		 */
22189 -		schedule();
22190 -
22191 -		TRACE_CUR("woke up, now owns PI lock %p\n", sem);
22192 -
22193 -		/* try_to_wake_up() set our state to TASK_RUNNING,
22194 -		 * all we need to do is to remove our wait queue entry
22195 -		 */
22196 -		remove_wait_queue(&sem->wait, &wait);
22197 -	} else {
22198 -		/* no priority inheritance necessary, since there are no queued
22199 -		 * tasks.
22200 -		 */
22201 -		suspended = 0;
22202 -		TRACE_CUR("acquired PI lock %p, no contention\n", sem);
22203 -		sem->holder  = tsk;
22204 -
22205 -		/* don't know if we're global or partitioned. */
22206 -		sem->hp.task = tsk;
22207 -		sem->hp.cpu_task[get_partition(tsk)] = tsk;
22208 -
22209 -		litmus->inherit_priority(sem, tsk);
22210 -		spin_unlock_irqrestore(&sem->wait.lock, flags);
22211 -	}
22212 -	return suspended;
22213 -}
22214 -
22215 -static void do_fmlp_up(struct pi_semaphore* sem)
22216 -{
22217 -	unsigned long flags;
22218 -
22219 -	spin_lock_irqsave(&sem->wait.lock, flags);
22220 -
22221 -	TRACE_CUR("releases PI lock %p\n", sem);
22222 -	litmus->return_priority(sem);
22223 -	sem->holder = NULL;
22224 -	if (atomic_inc_return(&sem->count) < 1)
22225 -		/* there is a task queued */
22226 -		wake_up_locked(&sem->wait);
22227 -
22228 -	spin_unlock_irqrestore(&sem->wait.lock, flags);
22229 -}
22230 -
22231 -asmlinkage long sys_fmlp_down(int sem_od)
22232 -{
22233 -	long ret = 0;
22234 -	struct pi_semaphore * sem;
22235 -	int suspended = 0;
22236 -
22237 -	preempt_disable();
22238 -	TS_PI_DOWN_START;
22239 -
22240 -	sem = lookup_fmlp_sem(sem_od);
22241 -	if (sem)
22242 -		suspended = do_fmlp_down(sem);
22243 -	else
22244 -		ret = -EINVAL;
22245 -
22246 -	if (!suspended) {
22247 -		TS_PI_DOWN_END;
22248 -		preempt_enable();
22249 -	}
22250 -
22251 -	return ret;
22252 -}
22253 -
22254 -asmlinkage long sys_fmlp_up(int sem_od)
22255 -{
22256 -	long ret = 0;
22257 -	struct pi_semaphore * sem;
22258 -
22259 -	preempt_disable();
22260 -	TS_PI_UP_START;
22261 -
22262 -	sem = lookup_fmlp_sem(sem_od);
22263 -	if (sem)
22264 -		do_fmlp_up(sem);
22265 -	else
22266 -		ret = -EINVAL;
22267 -
22268 -
22269 -	TS_PI_UP_END;
22270 -	preempt_enable();
22271 -
22272 -	return ret;
22273 -}
22274 -
22275 -#else
22276 -
22277 -struct fdso_ops fmlp_sem_ops = {};
22278 -
22279 -asmlinkage long sys_fmlp_down(int sem_od)
22280 -{
22281 -	return -ENOSYS;
22282 -}
22283 -
22284 -asmlinkage long sys_fmlp_up(int sem_od)
22285 -{
22286 -	return -ENOSYS;
22287 -}
22288 -
22289 -#endif
22290 diff --git a/litmus/ftdev.c b/litmus/ftdev.c
22291 index 4a4b2e3..e282f8a 100644
22292 --- a/litmus/ftdev.c
22293 +++ b/litmus/ftdev.c
22294 @@ -229,13 +229,20 @@ static ssize_t ftdev_read(struct file *filp,
22295  			 * here with copied data because that data would get
22296  			 * lost if the task is interrupted (e.g., killed).
22297  			 */
22298 +			mutex_unlock(&ftdm->lock);
22299  			set_current_state(TASK_INTERRUPTIBLE);
22300 +
22301  			schedule_timeout(50);
22302 +
22303  			if (signal_pending(current)) {
22304  				if (err == 0)
22305  					/* nothing read yet, signal problem */
22306  					err = -ERESTARTSYS;
22307 -				break;
22308 +				goto out;
22309 +			}
22310 +			if (mutex_lock_interruptible(&ftdm->lock)) {
22311 +				err = -ERESTARTSYS;
22312 +				goto out;
22313  			}
22314  		} else if (copied < 0) {
22315  			/* page fault */
22316 @@ -250,64 +257,47 @@ out:
22317  	return err;
22318  }
22319  
22320 -typedef uint32_t cmd_t;
22321 -
22322 -static ssize_t ftdev_write(struct file *filp, const char __user *from,
22323 -			   size_t len, loff_t *f_pos)
22324 +static long ftdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
22325  {
22326 +	long err = -ENOIOCTLCMD;
22327  	struct ftdev_minor* ftdm = filp->private_data;
22328 -	ssize_t err = -EINVAL;
22329 -	cmd_t cmd;
22330 -	cmd_t id;
22331 -
22332 -	if (len % sizeof(cmd) || len < 2 * sizeof(cmd))
22333 -		goto out;
22334 -
22335 -	if (copy_from_user(&cmd, from, sizeof(cmd))) {
22336 -		err = -EFAULT;
22337 -	        goto out;
22338 -	}
22339 -	len  -= sizeof(cmd);
22340 -	from += sizeof(cmd);
22341 -
22342 -	if (cmd != FTDEV_ENABLE_CMD && cmd != FTDEV_DISABLE_CMD)
22343 -		goto out;
22344  
22345  	if (mutex_lock_interruptible(&ftdm->lock)) {
22346  		err = -ERESTARTSYS;
22347  		goto out;
22348  	}
22349  
22350 -	err = sizeof(cmd);
22351 -	while (len) {
22352 -		if (copy_from_user(&id, from, sizeof(cmd))) {
22353 -			err = -EFAULT;
22354 -			goto out_unlock;
22355 -		}
22356 -		/* FIXME: check id against list of acceptable events */
22357 -		len  -= sizeof(cmd);
22358 -		from += sizeof(cmd);
22359 -		if (cmd == FTDEV_DISABLE_CMD)
22360 -			deactivate(&ftdm->events, id);
22361 -		else if (activate(&ftdm->events, id) != 0) {
22362 +	/* FIXME: check id against list of acceptable events */
22363 +
22364 +	switch (cmd) {
22365 +	case  FTDEV_ENABLE_CMD:
22366 +		if (activate(&ftdm->events, arg))
22367  			err = -ENOMEM;
22368 -			goto out_unlock;
22369 -		}
22370 -		err += sizeof(cmd);
22371 -	}
22372 +		else
22373 +			err = 0;
22374 +		break;
22375 +
22376 +	case FTDEV_DISABLE_CMD:
22377 +		deactivate(&ftdm->events, arg);
22378 +		err = 0;
22379 +		break;
22380 +
22381 +	default:
22382 +		printk(KERN_DEBUG "ftdev: strange ioctl (%u, %lu)\n", cmd, arg);
22383 +	};
22384  
22385 -out_unlock:
22386  	mutex_unlock(&ftdm->lock);
22387  out:
22388  	return err;
22389  }
22390  
22391 +
22392  struct file_operations ftdev_fops = {
22393  	.owner   = THIS_MODULE,
22394  	.open    = ftdev_open,
22395  	.release = ftdev_release,
22396 -	.write   = ftdev_write,
22397  	.read    = ftdev_read,
22398 +	.unlocked_ioctl = ftdev_ioctl,
22399  };
22400  
22401  int ftdev_init(	struct ftdev* ftdev, struct module* owner,
22402 diff --git a/litmus/jobs.c b/litmus/jobs.c
22403 index 36e3146..10a42db 100644
22404 --- a/litmus/jobs.c
22405 +++ b/litmus/jobs.c
22406 @@ -10,8 +10,11 @@ void prepare_for_next_period(struct task_struct *t)
22407  {
22408  	BUG_ON(!t);
22409  	/* prepare next release */
22410 +
22411  	t->rt_param.job_params.release   = t->rt_param.job_params.deadline;
22412 +	t->rt_param.job_params.real_release =  t->rt_param.job_params.release;
22413  	t->rt_param.job_params.deadline += get_rt_period(t);
22414 +	t->rt_param.job_params.real_deadline = t->rt_param.job_params.deadline;
22415  	t->rt_param.job_params.exec_time = 0;
22416  	/* update job sequence number */
22417  	t->rt_param.job_params.job_no++;
22418 diff --git a/litmus/litmus.c b/litmus/litmus.c
22419 index 8efd3f9..f59b419 100644
22420 --- a/litmus/litmus.c
22421 +++ b/litmus/litmus.c
22422 @@ -13,10 +13,19 @@
22423  #include <litmus/litmus.h>
22424  #include <litmus/bheap.h>
22425  #include <litmus/trace.h>
22426 +#include <litmus/event_group.h>
22427  #include <litmus/rt_domain.h>
22428  #include <litmus/litmus_proc.h>
22429  #include <litmus/sched_trace.h>
22430  
22431 +#ifdef CONFIG_PLUGIN_MC
22432 +#include <linux/pid.h>
22433 +#include <linux/hrtimer.h>
22434 +#include <litmus/sched_mc.h>
22435 +#else
22436 +struct mc_task;
22437 +#endif
22438 +
22439  /* Number of RT tasks that exist in the system */
22440  atomic_t rt_task_count 		= ATOMIC_INIT(0);
22441  static DEFINE_RAW_SPINLOCK(task_transition_lock);
22442 @@ -31,8 +40,16 @@ atomic_t __log_seq_no = ATOMIC_INIT(0);
22443  atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
22444  #endif
22445  
22446 -static struct kmem_cache * bheap_node_cache;
22447 -extern struct kmem_cache * release_heap_cache;
22448 +static struct kmem_cache *bheap_node_cache;
22449 +extern struct kmem_cache *release_heap_cache;
22450 +
22451 +#ifdef CONFIG_MERGE_TIMERS
22452 +extern struct kmem_cache *event_list_cache;
22453 +#endif
22454 +
22455 +#ifdef CONFIG_PLUGIN_MC
22456 +static struct kmem_cache *mc_data_cache;
22457 +#endif
22458  
22459  struct bheap_node* bheap_node_alloc(int gfp_flags)
22460  {
22461 @@ -44,7 +61,11 @@ void bheap_node_free(struct bheap_node* hn)
22462  	kmem_cache_free(bheap_node_cache, hn);
22463  }
22464  
22465 +#ifdef CONFIG_PLUGIN_MC
22466 +struct release_heap* release_heap_alloc(int gfp_flags, enum crit_level);
22467 +#else
22468  struct release_heap* release_heap_alloc(int gfp_flags);
22469 +#endif
22470  void release_heap_free(struct release_heap* rh);
22471  
22472  /*
22473 @@ -274,6 +295,105 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
22474  	return ret;
22475  }
22476  
22477 +#ifdef CONFIG_PLUGIN_MC
22478 +asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param)
22479 +{
22480 +	struct mc_task mc;
22481 +	struct mc_data *mc_data;
22482 +	struct task_struct *target;
22483 +	int retval = -EINVAL;
22484 +
22485 +	printk("Setting up mixed-criticality task parameters for process %d.\n",
22486 +		pid);
22487 +
22488 +	if (pid < 0 || param == 0) {
22489 +		goto out;
22490 +	}
22491 +	if (copy_from_user(&mc, param, sizeof(mc))) {
22492 +		retval = -EFAULT;
22493 +		goto out;
22494 +	}
22495 +
22496 +	/* Task search and manipulation must be protected */
22497 +	read_lock_irq(&tasklist_lock);
22498 +	if (!(target = find_task_by_vpid(pid))) {
22499 +		retval = -ESRCH;
22500 +		goto out_unlock;
22501 +	}
22502 +
22503 +	if (is_realtime(target)) {
22504 +		/* The task is already a real-time task.
22505 +		 * We cannot not allow parameter changes at this point.
22506 +		 */
22507 +		retval = -EBUSY;
22508 +		goto out_unlock;
22509 +	}
22510 +
22511 +	/* check parameters passed in are valid */
22512 +	if (mc.crit < CRIT_LEVEL_A || mc.crit >= NUM_CRIT_LEVELS) {
22513 +		printk(KERN_WARNING "litmus: real-time task %d rejected because "
22514 +			"of invalid criticality level\n", pid);
22515 +		goto out_unlock;
22516 +	}
22517 +	if (CRIT_LEVEL_A == mc.crit &&
22518 +			(mc.lvl_a_id < 0 ||
22519 +			 mc.lvl_a_id >= CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS)) {
22520 +		printk(KERN_WARNING "litmus: real-time task %d rejected because "
22521 +			"of invalid level A id\n", pid);
22522 +		goto out_unlock;
22523 +	}
22524 +
22525 +	mc_data = tsk_rt(target)->mc_data;
22526 +	if (!mc_data) {
22527 +		mc_data = kmem_cache_alloc(mc_data_cache, GFP_ATOMIC);
22528 +		if (!mc_data) {
22529 +			retval = -ENOMEM;
22530 +			goto out_unlock;
22531 +		}
22532 +		tsk_rt(target)->mc_data = mc_data;
22533 +	}
22534 +	mc_data->mc_task = mc;
22535 +
22536 +	retval = 0;
22537 +out_unlock:
22538 +	read_unlock_irq(&tasklist_lock);
22539 +out:
22540 +	return retval;
22541 +}
22542 +#else
22543 +asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param)
22544 +{
22545 +	/* don't allow this syscall if the plugin is not enabled */
22546 +	return -EINVAL;
22547 +}
22548 +#endif
22549 +
22550 +/*
22551 + * Change virtual timer speed
22552 + *   returns EINVAL if param is invalid
22553 + */
22554 +asmlinkage long sys_change_speed(int numerator, int denominator)
22555 +{
22556 +	if ((numerator > denominator) || (denominator == 0) || (numerator == 0))
22557 +		return -EINVAL;
22558 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
22559 +	mc_change_speed(numerator, denominator);
22560 +#endif    
22561 +	return 0;
22562 +}
22563 +
22564 +/* register pid of monitoring program */
22565 +asmlinkage long sys_register_pid(pid_t pid)
22566 +{
22567 +	if (pid < 0)
22568 +		return -EINVAL;
22569 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
22570 +    mc_set_monitor_pid(pid);
22571 +#endif    
22572 +	return 0;
22573 +}
22574 +
22575 +
22576  /* p is a real-time task. Re-init its state as a best-effort task. */
22577  static void reinit_litmus_state(struct task_struct* p, int restore)
22578  {
22579 @@ -292,9 +412,6 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
22580  	 */
22581  	WARN_ON(p->rt_param.inh_task);
22582  
22583 -	/* We need to restore the priority of the task. */
22584 -//	__setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); XXX why is this commented?
22585 -
22586  	/* Cleanup everything else. */
22587  	memset(&p->rt_param, 0, sizeof(p->rt_param));
22588  
22589 @@ -335,7 +452,11 @@ long litmus_admit_task(struct task_struct* tsk)
22590  
22591  	/* allocate heap node for this task */
22592  	tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
22593 -	tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
22594 +#ifdef CONFIG_PLUGIN_MC
22595 +	tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC, tsk_mc_crit(tsk));
22596 +#else
22597 +    tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
22598 +#endif
22599  
22600  	if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
22601  		printk(KERN_WARNING "litmus: no more heap node memory!?\n");
22602 @@ -437,10 +558,12 @@ out:
22603   */
22604  void litmus_fork(struct task_struct* p)
22605  {
22606 -	if (is_realtime(p))
22607 +	if (is_realtime(p)) {
22608  		/* clean out any litmus related state, don't preserve anything */
22609  		reinit_litmus_state(p, 0);
22610 -	else
22611 +		/* Don't let the child be a real-time task.  */
22612 +		p->sched_reset_on_fork = 1;
22613 +	} else
22614  		/* non-rt tasks might have ctrl_page set */
22615  		tsk_rt(p)->ctrl_page = NULL;
22616  
22617 @@ -480,6 +603,15 @@ void exit_litmus(struct task_struct *dead_tsk)
22618  		free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page);
22619  	}
22620  
22621 +#ifdef CONFIG_PLUGIN_MC
22622 +        /* The MC-setup syscall might succeed and allocate mc_data, but the
22623 +	 * task may not exit in real-time mode, and that memory will leak.
22624 +	 *  Check and free it here.
22625 +	 */
22626 +	if (tsk_rt(dead_tsk)->mc_data)
22627 +		kmem_cache_free(mc_data_cache, tsk_rt(dead_tsk)->mc_data);
22628 +#endif
22629 +
22630  	/* main cleanup only for RT tasks */
22631  	if (is_realtime(dead_tsk))
22632  		litmus_exit_task(dead_tsk);
22633 @@ -520,8 +652,14 @@ static int __init _init_litmus(void)
22634  
22635  	register_sched_plugin(&linux_sched_plugin);
22636  
22637 -	bheap_node_cache    = KMEM_CACHE(bheap_node, SLAB_PANIC);
22638 +	bheap_node_cache   = KMEM_CACHE(bheap_node, SLAB_PANIC);
22639  	release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
22640 +#ifdef CONFIG_MERGE_TIMERS
22641 +	event_list_cache   = KMEM_CACHE(event_list, SLAB_PANIC);
22642 +#endif
22643 +#ifdef CONFIG_PLUGIN_MC
22644 +	mc_data_cache      = KMEM_CACHE(mc_data, SLAB_PANIC);
22645 +#endif
22646  
22647  #ifdef CONFIG_MAGIC_SYSRQ
22648  	/* offer some debugging help */
22649 @@ -541,6 +679,12 @@ static void _exit_litmus(void)
22650  	exit_litmus_proc();
22651  	kmem_cache_destroy(bheap_node_cache);
22652  	kmem_cache_destroy(release_heap_cache);
22653 +#ifdef CONFIG_MERGE_TIMERS
22654 +	kmem_cache_destroy(event_list_cache);
22655 +#endif
22656 +#ifdef CONFIG_PLUGIN_MC
22657 +	kmem_cache_destroy(mc_data_cache);
22658 +#endif
22659  }
22660  
22661  module_init(_init_litmus);
22662 diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c
22663 index 81ea5c3..4bf725a 100644
22664 --- a/litmus/litmus_proc.c
22665 +++ b/litmus/litmus_proc.c
22666 @@ -8,6 +8,8 @@
22667  #include <litmus/litmus.h>
22668  #include <litmus/litmus_proc.h>
22669  
22670 +#include <litmus/clustered.h>
22671 +
22672  /* in litmus/litmus.c */
22673  extern atomic_t rt_task_count;
22674  
22675 @@ -69,18 +71,9 @@ static int proc_write_curr(struct file *file,
22676  	char name[65];
22677  	struct sched_plugin* found;
22678  
22679 -	if(count > 64)
22680 -		len = 64;
22681 -	else
22682 -		len = count;
22683 -
22684 -	if(copy_from_user(name, buffer, len))
22685 -		return -EFAULT;
22686 -
22687 -	name[len] = '\0';
22688 -	/* chomp name */
22689 -	if (len > 1 && name[len - 1] == '\n')
22690 -		name[len - 1] = '\0';
22691 +	len = copy_and_chomp(name, sizeof(name), buffer, count);
22692 +	if (len < 0)
22693 +		return len;
22694  
22695  	found = find_sched_plugin(name);
22696  
22697 @@ -113,36 +106,28 @@ static int proc_write_release_master(struct file *file,
22698  				     unsigned long count,
22699  				     void *data)
22700  {
22701 -	int cpu, err, online = 0;
22702 +	int cpu, err, len, online = 0;
22703  	char msg[64];
22704  
22705 -	if (count > 63)
22706 -		return -EINVAL;
22707 -
22708 -	if (copy_from_user(msg, buffer, count))
22709 -		return -EFAULT;
22710 +	len = copy_and_chomp(msg, sizeof(msg), buffer, count);
22711  
22712 -	/* terminate */
22713 -	msg[count] = '\0';
22714 -	/* chomp */
22715 -	if (count > 1 && msg[count - 1] == '\n')
22716 -		msg[count - 1] = '\0';
22717 +	if (len < 0)
22718 +		return len;
22719  
22720 -	if (strcmp(msg, "NO_CPU") == 0) {
22721 +	if (strcmp(msg, "NO_CPU") == 0)
22722  		atomic_set(&release_master_cpu, NO_CPU);
22723 -		return count;
22724 -	} else {
22725 +	else {
22726  		err = sscanf(msg, "%d", &cpu);
22727  		if (err == 1 && cpu >= 0 && (online = cpu_online(cpu))) {
22728  			atomic_set(&release_master_cpu, cpu);
22729 -			return count;
22730  		} else {
22731  			TRACE("invalid release master: '%s' "
22732  			      "(err:%d cpu:%d online:%d)\n",
22733  			      msg, err, cpu, online);
22734 -			return -EINVAL;
22735 +			len = -EINVAL;
22736  		}
22737  	}
22738 +	return len;
22739  }
22740  #endif
22741  
22742 @@ -257,3 +242,106 @@ void remove_plugin_proc_dir(struct sched_plugin* plugin)
22743  	}
22744  	remove_proc_entry(plugin->plugin_name, plugs_dir);
22745  }
22746 +
22747 +
22748 +
22749 +/* misc. I/O helper functions */
22750 +
22751 +int copy_and_chomp(char *kbuf, unsigned long ksize,
22752 +		   __user const char* ubuf, unsigned long ulength)
22753 +{
22754 +	/* caller must provide buffer space */
22755 +	BUG_ON(!ksize);
22756 +
22757 +	ksize--; /* leave space for null byte */
22758 +
22759 +	if (ksize > ulength)
22760 +		ksize = ulength;
22761 +
22762 +	if(copy_from_user(kbuf, ubuf, ksize))
22763 +		return -EFAULT;
22764 +
22765 +	kbuf[ksize] = '\0';
22766 +
22767 +	/* chomp kbuf */
22768 +	if (ksize > 0 && kbuf[ksize - 1] == '\n')
22769 +		kbuf[ksize - 1] = '\0';
22770 +
22771 +	return ksize;
22772 +}
22773 +
22774 +/* helper functions for clustered plugins */
22775 +static const char* cache_level_names[] = {
22776 +	"ALL",
22777 +	"L1",
22778 +	"L2",
22779 +	"L3",
22780 +};
22781 +
22782 +int parse_cache_level(const char *cache_name, enum cache_level *level)
22783 +{
22784 +	int err = -EINVAL;
22785 +	int i;
22786 +	/* do a quick and dirty comparison to find the cluster size */
22787 +	for (i = GLOBAL_CLUSTER; i <= L3_CLUSTER; i++)
22788 +		if (!strcmp(cache_name, cache_level_names[i])) {
22789 +			*level = (enum cache_level) i;
22790 +			err = 0;
22791 +			break;
22792 +		}
22793 +	return err;
22794 +}
22795 +
22796 +const char* cache_level_name(enum cache_level level)
22797 +{
22798 +	int idx = level;
22799 +
22800 +	if (idx >= GLOBAL_CLUSTER && idx <= L3_CLUSTER)
22801 +		return cache_level_names[idx];
22802 +	else
22803 +		return "INVALID";
22804 +}
22805 +
22806 +
22807 +/* proc file interface to configure the cluster size */
22808 +static int proc_read_cluster_size(char *page, char **start,
22809 +				  off_t off, int count,
22810 +				  int *eof, void *data)
22811 +{
22812 +	return snprintf(page, PAGE_SIZE, "%s\n",
22813 +			cache_level_name(*((enum cache_level*) data)));;
22814 +}
22815 +
22816 +static int proc_write_cluster_size(struct file *file,
22817 +				   const char *buffer,
22818 +				   unsigned long count,
22819 +				   void *data)
22820 +{
22821 +	int len;
22822 +	char cache_name[8];
22823 +
22824 +	len = copy_and_chomp(cache_name, sizeof(cache_name), buffer, count);
22825 +
22826 +	if (len > 0 && parse_cache_level(cache_name, (enum cache_level*) data))
22827 +		printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name);
22828 +
22829 +	return len;
22830 +}
22831 +
22832 +struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent,
22833 +					   enum cache_level* level)
22834 +{
22835 +	struct proc_dir_entry* cluster_file;
22836 +
22837 +	cluster_file = create_proc_entry("cluster", 0644, parent);
22838 +	if (!cluster_file) {
22839 +		printk(KERN_ERR "Could not allocate %s/cluster "
22840 +		       "procfs entry.\n", parent->name);
22841 +	} else {
22842 +		cluster_file->read_proc = proc_read_cluster_size;
22843 +		cluster_file->write_proc = proc_write_cluster_size;
22844 +		cluster_file->data = level;
22845 +	}
22846 +	return cluster_file;
22847 +}
22848 +
22849 diff --git a/litmus/locking.c b/litmus/locking.c
22850 new file mode 100644
22851 index 0000000..bc41508
22852 --- /dev/null
22853 +++ b/litmus/locking.c
22854 @@ -0,0 +1,151 @@
22855 +#include <linux/sched.h>
22856 +#include <litmus/litmus.h>
22857 +#include <litmus/fdso.h>
22858 +
22859 +#ifdef CONFIG_LITMUS_LOCKING
22860 +
22861 +#include <litmus/sched_plugin.h>
22862 +#include <litmus/trace.h>
22863 +
22864 +static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg);
22865 +static int open_generic_lock(struct od_table_entry* entry, void* __user arg);
22866 +static int close_generic_lock(struct od_table_entry* entry);
22867 +static void destroy_generic_lock(obj_type_t type, void* sem);
22868 +
22869 +struct fdso_ops generic_lock_ops = {
22870 +	.create  = create_generic_lock,
22871 +	.open    = open_generic_lock,
22872 +	.close   = close_generic_lock,
22873 +	.destroy = destroy_generic_lock
22874 +};
22875 +
22876 +static inline bool is_lock(struct od_table_entry* entry)
22877 +{
22878 +	return entry->class == &generic_lock_ops;
22879 +}
22880 +
22881 +static inline struct litmus_lock* get_lock(struct od_table_entry* entry)
22882 +{
22883 +	BUG_ON(!is_lock(entry));
22884 +	return (struct litmus_lock*) entry->obj->obj;
22885 +}
22886 +
22887 +static  int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg)
22888 +{
22889 +	struct litmus_lock* lock;
22890 +	int err;
22891 +
22892 +	err = litmus->allocate_lock(&lock, type, arg);
22893 +	if (err == 0)
22894 +		*obj_ref = lock;
22895 +	return err;
22896 +}
22897 +
22898 +static int open_generic_lock(struct od_table_entry* entry, void* __user arg)
22899 +{
22900 +	struct litmus_lock* lock = get_lock(entry);
22901 +	if (lock->ops->open)
22902 +		return lock->ops->open(lock, arg);
22903 +	else
22904 +		return 0; /* default: any task can open it */
22905 +}
22906 +
22907 +static int close_generic_lock(struct od_table_entry* entry)
22908 +{
22909 +	struct litmus_lock* lock = get_lock(entry);
22910 +	if (lock->ops->close)
22911 +		return lock->ops->close(lock);
22912 +	else
22913 +		return 0; /* default: closing succeeds */
22914 +}
22915 +
22916 +static void destroy_generic_lock(obj_type_t type, void* obj)
22917 +{
22918 +	struct litmus_lock* lock = (struct litmus_lock*) obj;
22919 +	lock->ops->deallocate(lock);
22920 +}
22921 +
22922 +asmlinkage long sys_litmus_lock(int lock_od)
22923 +{
22924 +	long err = -EINVAL;
22925 +	struct od_table_entry* entry;
22926 +	struct litmus_lock* l;
22927 +
22928 +	TS_SYSCALL_IN_START;
22929 +
22930 +	TS_SYSCALL_IN_END;
22931 +
22932 +	TS_LOCK_START;
22933 +
22934 +	entry = get_entry_for_od(lock_od);
22935 +	if (entry && is_lock(entry)) {
22936 +		l = get_lock(entry);
22937 +		TRACE_CUR("attempts to lock 0x%p\n", l);
22938 +		err = l->ops->lock(l);
22939 +	}
22940 +
22941 +	/* Note: task my have been suspended or preempted in between!  Take
22942 +	 * this into account when computing overheads. */
22943 +	TS_LOCK_END;
22944 +
22945 +	TS_SYSCALL_OUT_START;
22946 +
22947 +	return err;
22948 +}
22949 +
22950 +asmlinkage long sys_litmus_unlock(int lock_od)
22951 +{
22952 +	long err = -EINVAL;
22953 +	struct od_table_entry* entry;
22954 +	struct litmus_lock* l;
22955 +
22956 +	TS_SYSCALL_IN_START;
22957 +
22958 +	TS_SYSCALL_IN_END;
22959 +
22960 +	TS_UNLOCK_START;
22961 +
22962 +	entry = get_entry_for_od(lock_od);
22963 +	if (entry && is_lock(entry)) {
22964 +		l = get_lock(entry);
22965 +		TRACE_CUR("attempts to unlock 0x%p\n", l);
22966 +		err = l->ops->unlock(l);
22967 +	}
22968 +
22969 +	/* Note: task my have been preempted in between!  Take this into
22970 +	 * account when computing overheads. */
22971 +	TS_UNLOCK_END;
22972 +
22973 +	TS_SYSCALL_OUT_START;
22974 +
22975 +	return err;
22976 +}
22977 +
22978 +struct task_struct* waitqueue_first(wait_queue_head_t *wq)
22979 +{
22980 +	wait_queue_t *q;
22981 +
22982 +	if (waitqueue_active(wq)) {
22983 +		q = list_entry(wq->task_list.next,
22984 +			       wait_queue_t, task_list);
22985 +		return (struct task_struct*) q->private;
22986 +	} else
22987 +		return NULL;
22988 +}
22989 +
22990 +
22991 +#else
22992 +
22993 +struct fdso_ops generic_lock_ops = {};
22994 +
22995 +asmlinkage long sys_litmus_lock(int sem_od)
22996 +{
22997 +	return -ENOSYS;
22998 +}
22999 +
23000 +asmlinkage long sys_litmus_unlock(int sem_od)
23001 +{
23002 +	return -ENOSYS;
23003 +}
23004 +
23005 +#endif
23006 diff --git a/litmus/preempt.c b/litmus/preempt.c
23007 index ebe2e34..528d713 100644
23008 --- a/litmus/preempt.c
23009 +++ b/litmus/preempt.c
23010 @@ -6,6 +6,8 @@
23011  /* The rescheduling state of each processor.
23012   */
23013  DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
23014 +#define TRACE_TASK(t, fmt, args...)
23015 +#define TRACE(fmt, args...)
23016  
23017  void sched_state_will_schedule(struct task_struct* tsk)
23018  {
23019 @@ -26,10 +28,10 @@ void sched_state_will_schedule(struct task_struct* tsk)
23020  			set_sched_state(PICKED_WRONG_TASK);
23021  		else
23022  			set_sched_state(WILL_SCHEDULE);
23023 -	} else
23024 -		/* Litmus tasks should never be subject to a remote
23025 -		 * set_tsk_need_resched(). */
23026 -		BUG_ON(is_realtime(tsk));
23027 +	} /* else */
23028 +	  /* 	/\* Litmus tasks should never be subject to a remote */
23029 +	  /* 	 * set_tsk_need_resched(). *\/ */
23030 +	  /* 	BUG_ON(is_realtime(tsk)); */
23031  	TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n",
23032  		   __builtin_return_address(0));
23033  }
23034 diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
23035 index 81a5ac1..31fbb93 100644
23036 --- a/litmus/rt_domain.c
23037 +++ b/litmus/rt_domain.c
23038 @@ -10,15 +10,14 @@
23039  #include <linux/sched.h>
23040  #include <linux/list.h>
23041  #include <linux/slab.h>
23042 +#include <linux/smp.h>
23043  
23044  #include <litmus/litmus.h>
23045 +#include <litmus/event_group.h>
23046  #include <litmus/sched_plugin.h>
23047  #include <litmus/sched_trace.h>
23048 -
23049  #include <litmus/rt_domain.h>
23050 -
23051  #include <litmus/trace.h>
23052 -
23053  #include <litmus/bheap.h>
23054  
23055  /* Uncomment when debugging timer races... */
23056 @@ -51,54 +50,99 @@ static unsigned int time2slot(lt_t time)
23057  	return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS;
23058  }
23059  
23060 -static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
23061 +static void do_release(struct release_heap *rh)
23062  {
23063  	unsigned long flags;
23064 -	struct release_heap* rh;
23065 -
23066 -	VTRACE("on_release_timer(0x%p) starts.\n", timer);
23067  
23068 -	TS_RELEASE_START;
23069 -
23070 -	rh = container_of(timer, struct release_heap, timer);
23071 +	if (CRIT_LEVEL_B == rh->dom->level)
23072 +		TS_LVLB_RELEASE_START;
23073 +	else
23074 +		TS_LVLC_RELEASE_START;
23075  
23076  	raw_spin_lock_irqsave(&rh->dom->release_lock, flags);
23077  	VTRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock);
23078  	/* remove from release queue */
23079 -	list_del(&rh->list);
23080 +	list_del_init(&rh->list);
23081  	raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags);
23082  	VTRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock);
23083  
23084  	/* call release callback */
23085  	rh->dom->release_jobs(rh->dom, &rh->heap);
23086 -	/* WARNING: rh can be referenced from other CPUs from now on. */
23087  
23088 -	TS_RELEASE_END;
23089 +	if (CRIT_LEVEL_B == rh->dom->level)
23090 +		TS_LVLB_RELEASE_END;
23091 +	else
23092 +		TS_LVLC_RELEASE_END;
23093 +}
23094  
23095 -	VTRACE("on_release_timer(0x%p) ends.\n", timer);
23096 +#ifdef CONFIG_MERGE_TIMERS
23097 +static void on_release(struct rt_event *e)
23098 +{
23099 +	do_release(container_of(e, struct release_heap, event));
23100 +}
23101 +#else
23102 +static enum hrtimer_restart on_release(struct hrtimer *timer)
23103 +{
23104 +	do_release(container_of(timer, struct release_heap, timer));
23105 +	return HRTIMER_NORESTART;
23106 +}
23107 +#endif
23108  
23109 -	return  HRTIMER_NORESTART;
23110 +static enum hrtimer_restart on_vtimer_release(struct hrtimer *vtimer)
23111 +{
23112 +	do_release(container_of(vtimer, struct release_heap, vtimer));
23113 +	return HRTIMER_NORESTART;
23114  }
23115  
23116  /* allocated in litmus.c */
23117  struct kmem_cache * release_heap_cache;
23118  
23119 +#ifdef CONFIG_PLUGIN_MC
23120 +struct release_heap* release_heap_alloc(int gfp_flags, enum crit_level level)
23121 +#else
23122  struct release_heap* release_heap_alloc(int gfp_flags)
23123 +#endif
23124  {
23125  	struct release_heap* rh;
23126 -	rh= kmem_cache_alloc(release_heap_cache, gfp_flags);
23127 +	rh = kmem_cache_alloc(release_heap_cache, gfp_flags);
23128  	if (rh) {
23129 +#ifdef CONFIG_MERGE_TIMERS
23130 +		init_event(&rh->event, 0, on_release,
23131 +			   event_list_alloc(GFP_ATOMIC));
23132 +#else
23133  		/* initialize timer */
23134  		hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
23135 -		rh->timer.function = on_release_timer;
23136 +		rh->timer.function = on_release;
23137 +#ifdef CONFIG_PLUGIN_MC  
23138 +        if (level == CRIT_LEVEL_C) {
23139 +            hrtimer_init(&rh->vtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
23140 +            rh->vtimer.function = on_vtimer_release;
23141 +        }
23142 +        else
23143 +            rh->vtimer.function = NULL;
23144 +#endif
23145 +#endif
23146  	}
23147  	return rh;
23148  }
23149  
23150 +#ifdef CONFIG_MERGE_TIMERS
23151 +extern struct kmem_cache *event_list_cache;
23152 +#endif
23153 +
23154  void release_heap_free(struct release_heap* rh)
23155  {
23156  	/* make sure timer is no longer in use */
23157 +#ifdef CONFIG_MERGE_TIMERS
23158 +	if (rh->dom) {
23159 +		cancel_event(&rh->event);
23160 +		kmem_cache_free(event_list_cache, rh->event.event_list);
23161 +	}
23162 +#else
23163  	hrtimer_cancel(&rh->timer);
23164 +    if (rh->vtimer.function)
23165 +        hrtimer_cancel(&rh->vtimer);
23166 +#endif
23167  	kmem_cache_free(release_heap_cache, rh);
23168  }
23169  
23170 @@ -147,13 +191,17 @@ static struct release_heap* get_release_heap(rt_domain_t *rt,
23171  	return heap;
23172  }
23173  
23174 -static void reinit_release_heap(struct task_struct* t)
23175 +static void reinit_release_heap(rt_domain_t *rt, struct task_struct* t)
23176  {
23177  	struct release_heap* rh;
23178  
23179  	/* use pre-allocated release heap */
23180  	rh = tsk_rt(t)->rel_heap;
23181  
23182 +#ifdef CONFIG_MERGE_TIMERS
23183 +	rh->event.prio = rt->prio;
23184 +	cancel_event(&rh->event);
23185 +#else
23186  	/* Make sure it is safe to use.  The timer callback could still
23187  	 * be executing on another CPU; hrtimer_cancel() will wait
23188  	 * until the timer callback has completed.  However, under no
23189 @@ -164,14 +212,52 @@ static void reinit_release_heap(struct task_struct* t)
23190  	 *          deadlock may occur!
23191  	 */
23192  	BUG_ON(hrtimer_cancel(&rh->timer));
23193 -
23194 +    if (rh->vtimer.function)
23195 +        BUG_ON(hrtimer_cancel(&rh->vtimer));
23196 +#ifdef CONFIG_RELEASE_MASTER
23197 +	atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE);
23198 +#endif
23199 +#endif
23200  	/* initialize */
23201  	bheap_init(&rh->heap);
23202 +}
23203 +
23204  #ifdef CONFIG_RELEASE_MASTER
23205 -	atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE);
23206 +static void arm_release_timer_on(struct release_heap *rh, int target_cpu)
23207 +#else
23208 +static void arm_release_timer(struct release_heap *rh)
23209 +#endif
23210 +{
23211 +#ifdef CONFIG_MERGE_TIMERS
23212 +	add_event(rh->dom->event_group, &rh->event, rh->release_time);
23213 +#else
23214 +	VTRACE("arming timer 0x%p\n", &rh->timer);
23215 +	/* we cannot arm the timer using hrtimer_start()
23216 +	 * as it may deadlock on rq->lock
23217 +	 * PINNED mode is ok on both local and remote CPU
23218 +	 */
23219 +     
23220 +#ifdef CONFIG_RELEASE_MASTER
23221 +	if (rh->dom->release_master == NO_CPU && target_cpu == NO_CPU) {
23222 +#endif
23223 +		__hrtimer_start_range_ns(&rh->timer,
23224 +					 ns_to_ktime(rh->release_time),
23225 +					 0, HRTIMER_MODE_ABS_PINNED, 0);
23226 +#ifdef CONFIG_RELEASE_MASTER
23227 +	}
23228 +	else {
23229 +		hrtimer_start_on(/* target_cpu overrides release master */
23230 +				 (target_cpu != NO_CPU ?
23231 +				  target_cpu : rh->dom->release_master),
23232 +				 &rh->info, &rh->timer,
23233 +				 ns_to_ktime(rh->release_time),
23234 +				 HRTIMER_MODE_ABS_PINNED);
23235 +	}
23236 +#endif
23237  #endif
23238  }
23239 -/* arm_release_timer() - start local release timer or trigger
23240 +
23241 +/* setup_release() - start local release timer or trigger
23242   *     remote timer (pull timer)
23243   *
23244   * Called by add_release() with:
23245 @@ -179,10 +265,10 @@ static void reinit_release_heap(struct task_struct* t)
23246   * - IRQ disabled
23247   */
23248  #ifdef CONFIG_RELEASE_MASTER
23249 -#define arm_release_timer(t) arm_release_timer_on((t), NO_CPU)
23250 -static void arm_release_timer_on(rt_domain_t *_rt , int target_cpu)
23251 +#define setup_release(t) setup_release_on((t), NO_CPU)
23252 +static void setup_release_on(rt_domain_t *_rt , int target_cpu)
23253  #else
23254 -static void arm_release_timer(rt_domain_t *_rt)
23255 +static void setup_release(rt_domain_t *_rt)
23256  #endif
23257  {
23258  	rt_domain_t *rt = _rt;
23259 @@ -191,37 +277,34 @@ static void arm_release_timer(rt_domain_t *_rt)
23260  	struct task_struct* t;
23261  	struct release_heap* rh;
23262  
23263 -	VTRACE("arm_release_timer() at %llu\n", litmus_clock());
23264 +	VTRACE("setup_release() at %llu\n", litmus_clock());
23265  	list_replace_init(&rt->tobe_released, &list);
23266  
23267  	list_for_each_safe(pos, safe, &list) {
23268  		/* pick task of work list */
23269  		t = list_entry(pos, struct task_struct, rt_param.list);
23270 -		sched_trace_task_release(t);
23271 -		list_del(pos);
23272 +		list_del_init(pos);
23273  
23274  		/* put into release heap while holding release_lock */
23275  		raw_spin_lock(&rt->release_lock);
23276  		VTRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock);
23277 -
23278 +		
23279  		rh = get_release_heap(rt, t, 0);
23280  		if (!rh) {
23281  			/* need to use our own, but drop lock first */
23282  			raw_spin_unlock(&rt->release_lock);
23283 -			VTRACE_TASK(t, "Dropped release_lock 0x%p\n",
23284 -				    &rt->release_lock);
23285 +			VTRACE_TASK(t, "Dropped release_lock 0x%p\n", &rt->release_lock);
23286  
23287 -			reinit_release_heap(t);
23288 +			reinit_release_heap(rt, t);
23289  			VTRACE_TASK(t, "release_heap ready\n");
23290  
23291  			raw_spin_lock(&rt->release_lock);
23292 -			VTRACE_TASK(t, "Re-acquired release_lock 0x%p\n",
23293 -				    &rt->release_lock);
23294 +			VTRACE_TASK(t, "Re-acquired release_lock 0x%p\n", &rt->release_lock);
23295  
23296  			rh = get_release_heap(rt, t, 1);
23297  		}
23298  		bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
23299 -		VTRACE_TASK(t, "arm_release_timer(): added to release heap\n");
23300 +		VTRACE_TASK(t, "setup_release(): added to release heap\n");
23301  
23302  		raw_spin_unlock(&rt->release_lock);
23303  		VTRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock);
23304 @@ -231,39 +314,19 @@ static void arm_release_timer(rt_domain_t *_rt)
23305  		 * this release_heap anyway).
23306  		 */
23307  		if (rh == tsk_rt(t)->rel_heap) {
23308 -			VTRACE_TASK(t, "arming timer 0x%p\n", &rh->timer);
23309 -			/* we cannot arm the timer using hrtimer_start()
23310 -			 * as it may deadlock on rq->lock
23311 -			 *
23312 -			 * PINNED mode is ok on both local and remote CPU
23313 -			 */
23314  #ifdef CONFIG_RELEASE_MASTER
23315 -			if (rt->release_master == NO_CPU &&
23316 -			    target_cpu == NO_CPU)
23317 -#endif
23318 -				__hrtimer_start_range_ns(&rh->timer,
23319 -						ns_to_ktime(rh->release_time),
23320 -						0, HRTIMER_MODE_ABS_PINNED, 0);
23321 -#ifdef CONFIG_RELEASE_MASTER
23322 -			else
23323 -				hrtimer_start_on(
23324 -					/* target_cpu overrides release master */
23325 -					(target_cpu != NO_CPU ?
23326 -					 target_cpu : rt->release_master),
23327 -					&rh->info, &rh->timer,
23328 -					ns_to_ktime(rh->release_time),
23329 -					HRTIMER_MODE_ABS_PINNED);
23330 +			arm_release_timer_on(rh, target_cpu);
23331 +#else
23332 +			arm_release_timer(rh);
23333  #endif
23334 -		} else
23335 -			VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer);
23336 +		}
23337  	}
23338  }
23339  
23340  void rt_domain_init(rt_domain_t *rt,
23341  		    bheap_prio_t order,
23342  		    check_resched_needed_t check,
23343 -		    release_jobs_t release
23344 -		   )
23345 +		    release_jobs_t release)
23346  {
23347  	int i;
23348  
23349 @@ -275,7 +338,7 @@ void rt_domain_init(rt_domain_t *rt,
23350  	if (!order)
23351  		order = dummy_order;
23352  
23353 -#ifdef CONFIG_RELEASE_MASTER
23354 +#if defined(CONFIG_RELEASE_MASTER) && !defined(CONFIG_MERGE_TIMERS)
23355  	rt->release_master = NO_CPU;
23356  #endif
23357  
23358 @@ -298,12 +361,11 @@ void rt_domain_init(rt_domain_t *rt,
23359   */
23360  void __add_ready(rt_domain_t* rt, struct task_struct *new)
23361  {
23362 -	TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n",
23363 +	VTRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n",
23364  	      new->comm, new->pid, get_exec_cost(new), get_rt_period(new),
23365  	      get_release(new), litmus_clock());
23366 -
23367  	BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node));
23368 -
23369 +	new->rt_param.domain = rt;
23370  	bheap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node);
23371  	rt->check_resched(rt);
23372  }
23373 @@ -322,7 +384,7 @@ void __merge_ready(rt_domain_t* rt, struct bheap* tasks)
23374  void __add_release_on(rt_domain_t* rt, struct task_struct *task,
23375  		      int target_cpu)
23376  {
23377 -	TRACE_TASK(task, "add_release_on(), rel=%llu, target=%d\n",
23378 +	VTRACE_TASK(task, "add_release_on(), rel=%llu, target=%d\n",
23379  		   get_release(task), target_cpu);
23380  	list_add(&tsk_rt(task)->list, &rt->tobe_released);
23381  	task->rt_param.domain = rt;
23382 @@ -330,7 +392,7 @@ void __add_release_on(rt_domain_t* rt, struct task_struct *task,
23383  	/* start release timer */
23384  	TS_SCHED2_START(task);
23385  
23386 -	arm_release_timer_on(rt, target_cpu);
23387 +	setup_release_on(rt, target_cpu);
23388  
23389  	TS_SCHED2_END(task);
23390  }
23391 @@ -341,15 +403,88 @@ void __add_release_on(rt_domain_t* rt, struct task_struct *task,
23392   */
23393  void __add_release(rt_domain_t* rt, struct task_struct *task)
23394  {
23395 -	TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task));
23396 +	VTRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task));
23397  	list_add(&tsk_rt(task)->list, &rt->tobe_released);
23398  	task->rt_param.domain = rt;
23399  
23400  	/* start release timer */
23401  	TS_SCHED2_START(task);
23402  
23403 -	arm_release_timer(rt);
23404 +	setup_release(rt);
23405  
23406  	TS_SCHED2_END(task);
23407  }
23408  
23409 +/******************************************************************************
23410 + * domain_t wrapper
23411 + ******************************************************************************/
23412 +
23413 +/* pd_requeue - calls underlying rt_domain add methods.
23414 + * If the task is not yet released, it is inserted into the rt_domain
23415 + * ready queue. Otherwise, it is queued for release.
23416 + *
23417 + * Assumes the caller already holds dom->lock.
23418 + */
23419 +static void pd_requeue(domain_t *dom, struct task_struct *task)
23420 +{
23421 +	rt_domain_t *domain = (rt_domain_t*)dom->data;
23422 +
23423 +	TRACE_TASK(task, "Requeueing\n");
23424 +	BUG_ON(!task || !is_realtime(task));
23425 +	BUG_ON(is_queued(task));
23426 +	BUG_ON(get_task_domain(task) != dom);
23427 +
23428 +	if (is_released(task, litmus_clock())) {
23429 +		__add_ready(domain, task);
23430 +        VTRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n",
23431 +		      task->comm, task->pid, get_exec_cost(task), get_rt_period(task),
23432 +		      get_release(task), litmus_clock());
23433 +    } else {
23434 +		/* task has to wait for next release */
23435 +		VTRACE_TASK(task, "add release(), rel=%llu vpp=%llu\n", get_release(task), task->rt_param.job_params.virt_priority_point);
23436 +		add_release(domain, task);
23437 +	}
23438 +
23439 +}
23440 +
23441 +/* pd_take_ready - removes and returns the next ready task from the rt_domain
23442 + *
23443 + * Assumes the caller already holds dom->lock.
23444 + */
23445 +static struct task_struct* pd_take_ready(domain_t *dom)
23446 +{
23447 +	return __take_ready((rt_domain_t*)dom->data);
23448 + }
23449 +
23450 +/* pd_peek_ready - returns the head of the rt_domain ready queue
23451 + *
23452 + * Assumes the caller already holds dom->lock.
23453 + */
23454 +static struct task_struct* pd_peek_ready(domain_t *dom)
23455 +{
23456 +	return  __next_ready((rt_domain_t*)dom->data);
23457 +}
23458 +
23459 +static void pd_remove(domain_t *dom, struct task_struct *task)
23460 +{
23461 +	if (is_queued(task))
23462 +		remove((rt_domain_t*)dom->data, task);
23463 +}
23464 +
23465 +/* pd_domain_init - create a generic domain wrapper for an rt_domain
23466 + */
23467 +void pd_domain_init(domain_t *dom,
23468 +		    rt_domain_t *domain,
23469 +		    bheap_prio_t order,
23470 +		    check_resched_needed_t check,
23471 +		    release_jobs_t release,
23472 +		    preempt_needed_t preempt_needed,
23473 +		    task_prio_t priority)
23474 +{
23475 +	rt_domain_init(domain, order, check, release);
23476 +	domain_init(dom, &domain->ready_lock,
23477 +		    pd_requeue, pd_peek_ready, pd_take_ready,
23478 +		    preempt_needed, priority);
23479 +	dom->remove = pd_remove;
23480 +	dom->data = domain;
23481 +}
23482 diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
23483 index 098a449..73fe1c4 100644
23484 --- a/litmus/sched_cedf.c
23485 +++ b/litmus/sched_cedf.c
23486 @@ -39,6 +39,8 @@
23487  #include <litmus/edf_common.h>
23488  #include <litmus/sched_trace.h>
23489  
23490 +#include <litmus/clustered.h>
23491 +
23492  #include <litmus/bheap.h>
23493  
23494  /* to configure the cluster size */
23495 @@ -49,12 +51,7 @@
23496   * group CPUs into clusters.  GLOBAL_CLUSTER, which is the default, means that
23497   * all CPUs form a single cluster (just like GSN-EDF).
23498   */
23499 -static enum {
23500 -	GLOBAL_CLUSTER = 0,
23501 -	L1_CLUSTER     = 1,
23502 -	L2_CLUSTER     = 2,
23503 -	L3_CLUSTER     = 3
23504 -} cluster_config = GLOBAL_CLUSTER;
23505 +static enum cache_level cluster_config = GLOBAL_CLUSTER;
23506  
23507  struct clusterdomain;
23508  
23509 @@ -770,73 +767,8 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
23510  	.activate_plugin	= cedf_activate_plugin,
23511  };
23512  
23513 -
23514 -/* proc file interface to configure the cluster size */
23515 -
23516 -static int proc_read_cluster_size(char *page, char **start,
23517 -				  off_t off, int count,
23518 -				  int *eof, void *data)
23519 -{
23520 -	int len;
23521 -	switch (cluster_config) {
23522 -	case GLOBAL_CLUSTER:
23523 -		len = snprintf(page, PAGE_SIZE, "ALL\n");
23524 -		break;
23525 -	case L1_CLUSTER:
23526 -	case L2_CLUSTER:
23527 -	case L3_CLUSTER:
23528 -		len = snprintf(page, PAGE_SIZE, "L%d\n", cluster_config);
23529 -		break;
23530 -	default:
23531 -		/* This should be impossible, but let's be paranoid. */
23532 -		len = snprintf(page, PAGE_SIZE, "INVALID (%d)\n",
23533 -			       cluster_config);
23534 -		break;
23535 -	}
23536 -	return len;
23537 -}
23538 -
23539 -static int proc_write_cluster_size(struct file *file,
23540 -				   const char *buffer,
23541 -				   unsigned long count,
23542 -				   void *data)
23543 -{
23544 -	int len;
23545 -	/* L2, L3 */
23546 -	char cache_name[33];
23547 -
23548 -	if(count > 32)
23549 -		len = 32;
23550 -	else
23551 -		len = count;
23552 -
23553 -	if(copy_from_user(cache_name, buffer, len))
23554 -		return -EFAULT;
23555 -
23556 -	cache_name[len] = '\0';
23557 -	/* chomp name */
23558 -	if (len > 1 && cache_name[len - 1] == '\n')
23559 -		cache_name[len - 1] = '\0';
23560 -
23561 -	/* do a quick and dirty comparison to find the cluster size */
23562 -	if (!strcmp(cache_name, "L2"))
23563 -		cluster_config = L2_CLUSTER;
23564 -	else if (!strcmp(cache_name, "L3"))
23565 -		cluster_config = L3_CLUSTER;
23566 -	else if (!strcmp(cache_name, "L1"))
23567 -		cluster_config = L1_CLUSTER;
23568 -	else if (!strcmp(cache_name, "ALL"))
23569 -		cluster_config = GLOBAL_CLUSTER;
23570 -	else
23571 -		printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name);
23572 -
23573 -	return len;
23574 -}
23575 -
23576 -
23577  static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL;
23578  
23579 -
23580  static int __init init_cedf(void)
23581  {
23582  	int err, fs;
23583 @@ -844,18 +776,10 @@ static int __init init_cedf(void)
23584  	err = register_sched_plugin(&cedf_plugin);
23585  	if (!err) {
23586  		fs = make_plugin_proc_dir(&cedf_plugin, &cedf_dir);
23587 -		if (!fs) {
23588 -			cluster_file = create_proc_entry("cluster", 0644, cedf_dir);
23589 -			if (!cluster_file) {
23590 -				printk(KERN_ERR "Could not allocate C-EDF/cluster "
23591 -				       "procfs entry.\n");
23592 -			} else {
23593 -				cluster_file->read_proc = proc_read_cluster_size;
23594 -				cluster_file->write_proc = proc_write_cluster_size;
23595 -			}
23596 -		} else {
23597 +		if (!fs)
23598 +			cluster_file = create_cluster_file(cedf_dir, &cluster_config);
23599 +		else
23600  			printk(KERN_ERR "Could not allocate C-EDF procfs dir.\n");
23601 -		}
23602  	}
23603  	return err;
23604  }
23605 diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
23606 index e9c5e53..c5c9600 100644
23607 --- a/litmus/sched_gsn_edf.c
23608 +++ b/litmus/sched_gsn_edf.c
23609 @@ -11,6 +11,7 @@
23610  #include <linux/spinlock.h>
23611  #include <linux/percpu.h>
23612  #include <linux/sched.h>
23613 +#include <linux/slab.h>
23614  
23615  #include <litmus/litmus.h>
23616  #include <litmus/jobs.h>
23617 @@ -446,6 +447,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
23618  		if (entry->linked) {
23619  			entry->linked->rt_param.scheduled_on = entry->cpu;
23620  			next = entry->linked;
23621 +			TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id());
23622  		}
23623  		if (entry->scheduled) {
23624  			/* not gonna be scheduled soon */
23625 @@ -594,52 +596,60 @@ static void gsnedf_task_exit(struct task_struct * t)
23626          TRACE_TASK(t, "RIP\n");
23627  }
23628  
23629 -#ifdef CONFIG_FMLP
23630  
23631 -/* Update the queue position of a task that got it's priority boosted via
23632 - * priority inheritance. */
23633 -static void update_queue_position(struct task_struct *holder)
23634 +static long gsnedf_admit_task(struct task_struct* tsk)
23635  {
23636 -	/* We don't know whether holder is in the ready queue. It should, but
23637 -	 * on a budget overrun it may already be in a release queue.  Hence,
23638 -	 * calling unlink() is not possible since it assumes that the task is
23639 -	 * not in a release queue.  However, we can safely check whether
23640 -	 * sem->holder is currently in a queue or scheduled after locking both
23641 -	 * the release and the ready queue lock. */
23642 +	return 0;
23643 +}
23644 +
23645 +#ifdef CONFIG_LITMUS_LOCKING
23646  
23647 -	/* Assumption: caller holds gsnedf_lock */
23648 +#include <litmus/fdso.h>
23649  
23650 +/* called with IRQs off */
23651 +static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
23652 +{
23653 +	int linked_on;
23654  	int check_preempt = 0;
23655  
23656 -	if (tsk_rt(holder)->linked_on != NO_CPU) {
23657 -		TRACE_TASK(holder, "%s: linked  on %d\n",
23658 -			   __FUNCTION__, tsk_rt(holder)->linked_on);
23659 +	raw_spin_lock(&gsnedf_lock);
23660 +
23661 +	TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
23662 +	tsk_rt(t)->inh_task = prio_inh;
23663 +
23664 +	linked_on  = tsk_rt(t)->linked_on;
23665 +
23666 +	/* If it is scheduled, then we need to reorder the CPU heap. */
23667 +	if (linked_on != NO_CPU) {
23668 +		TRACE_TASK(t, "%s: linked  on %d\n",
23669 +			   __FUNCTION__, linked_on);
23670  		/* Holder is scheduled; need to re-order CPUs.
23671  		 * We can't use heap_decrease() here since
23672  		 * the cpu_heap is ordered in reverse direction, so
23673  		 * it is actually an increase. */
23674  		bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap,
23675 -			    gsnedf_cpus[tsk_rt(holder)->linked_on]->hn);
23676 +			    gsnedf_cpus[linked_on]->hn);
23677  		bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap,
23678 -			    gsnedf_cpus[tsk_rt(holder)->linked_on]->hn);
23679 +			    gsnedf_cpus[linked_on]->hn);
23680  	} else {
23681  		/* holder may be queued: first stop queue changes */
23682  		raw_spin_lock(&gsnedf.release_lock);
23683 -		if (is_queued(holder)) {
23684 -			TRACE_TASK(holder, "%s: is queued\n",
23685 +		if (is_queued(t)) {
23686 +			TRACE_TASK(t, "%s: is queued\n",
23687  				   __FUNCTION__);
23688 -			/* We need to update the position
23689 -			 * of holder in some heap. Note that this
23690 -			 * may be a release heap. */
23691 +			/* We need to update the position of holder in some
23692 +			 * heap. Note that this could be a release heap if we
23693 +			 * budget enforcement is used and this job overran. */
23694  			check_preempt =
23695  				!bheap_decrease(edf_ready_order,
23696 -					       tsk_rt(holder)->heap_node);
23697 +					       tsk_rt(t)->heap_node);
23698  		} else {
23699  			/* Nothing to do: if it is not queued and not linked
23700 -			 * then it is currently being moved by other code
23701 -			 * (e.g., a timer interrupt handler) that will use the
23702 -			 * correct priority when enqueuing the task. */
23703 -			TRACE_TASK(holder, "%s: is NOT queued => Done.\n",
23704 +			 * then it is either sleeping or currently being moved
23705 +			 * by other code (e.g., a timer interrupt handler) that
23706 +			 * will use the correct priority when enqueuing the
23707 +			 * task. */
23708 +			TRACE_TASK(t, "%s: is NOT queued => Done.\n",
23709  				   __FUNCTION__);
23710  		}
23711  		raw_spin_unlock(&gsnedf.release_lock);
23712 @@ -659,102 +669,257 @@ static void update_queue_position(struct task_struct *holder)
23713  			check_for_preemptions();
23714  		}
23715  	}
23716 +
23717 +	raw_spin_unlock(&gsnedf_lock);
23718  }
23719  
23720 -static long gsnedf_pi_block(struct pi_semaphore *sem,
23721 -			    struct task_struct *new_waiter)
23722 +/* called with IRQs off */
23723 +static void clear_priority_inheritance(struct task_struct* t)
23724  {
23725 -	/* This callback has to handle the situation where a new waiter is
23726 -	 * added to the wait queue of the semaphore.
23727 -	 *
23728 -	 * We must check if has a higher priority than the currently
23729 -	 * highest-priority task, and then potentially reschedule.
23730 -	 */
23731 +	raw_spin_lock(&gsnedf_lock);
23732 +
23733 +	/* A job only stops inheriting a priority when it releases a
23734 +	 * resource. Thus we can make the following assumption.*/
23735 +	BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU);
23736 +
23737 +	TRACE_TASK(t, "priority restored\n");
23738 +	tsk_rt(t)->inh_task = NULL;
23739 +
23740 +	/* Check if rescheduling is necessary. We can't use heap_decrease()
23741 +	 * since the priority was effectively lowered. */
23742 +	unlink(t);
23743 +	gsnedf_job_arrival(t);
23744 +
23745 +	raw_spin_unlock(&gsnedf_lock);
23746 +}
23747 +
23748 +
23749 +/* ******************** FMLP support ********************** */
23750 +
23751 +/* struct for semaphore with priority inheritance */
23752 +struct fmlp_semaphore {
23753 +	struct litmus_lock litmus_lock;
23754 +
23755 +	/* current resource holder */
23756 +	struct task_struct *owner;
23757  
23758 -	BUG_ON(!new_waiter);
23759 -
23760 -	if (edf_higher_prio(new_waiter, sem->hp.task)) {
23761 -		TRACE_TASK(new_waiter, " boosts priority via %p\n", sem);
23762 -		/* called with IRQs disabled */
23763 -		raw_spin_lock(&gsnedf_lock);
23764 -		/* store new highest-priority task */
23765 -		sem->hp.task = new_waiter;
23766 -		if (sem->holder) {
23767 -			TRACE_TASK(sem->holder,
23768 -				   " holds %p and will inherit from %s/%d\n",
23769 -				   sem,
23770 -				   new_waiter->comm, new_waiter->pid);
23771 -			/* let holder inherit */
23772 -			sem->holder->rt_param.inh_task = new_waiter;
23773 -			update_queue_position(sem->holder);
23774 +	/* highest-priority waiter */
23775 +	struct task_struct *hp_waiter;
23776 +
23777 +	/* FIFO queue of waiting tasks */
23778 +	wait_queue_head_t wait;
23779 +};
23780 +
23781 +static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock)
23782 +{
23783 +	return container_of(lock, struct fmlp_semaphore, litmus_lock);
23784 +}
23785 +
23786 +/* caller is responsible for locking */
23787 +struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem,
23788 +				   struct task_struct* skip)
23789 +{
23790 +	struct list_head	*pos;
23791 +	struct task_struct 	*queued, *found = NULL;
23792 +
23793 +	list_for_each(pos, &sem->wait.task_list) {
23794 +		queued  = (struct task_struct*) list_entry(pos, wait_queue_t,
23795 +							   task_list)->private;
23796 +
23797 +		/* Compare task prios, find high prio task. */
23798 +		if (queued != skip && edf_higher_prio(queued, found))
23799 +			found = queued;
23800 +	}
23801 +	return found;
23802 +}
23803 +
23804 +int gsnedf_fmlp_lock(struct litmus_lock* l)
23805 +{
23806 +	struct task_struct* t = current;
23807 +	struct fmlp_semaphore *sem = fmlp_from_lock(l);
23808 +	wait_queue_t wait;
23809 +	unsigned long flags;
23810 +
23811 +	if (!is_realtime(t))
23812 +		return -EPERM;
23813 +
23814 +	spin_lock_irqsave(&sem->wait.lock, flags);
23815 +
23816 +	if (sem->owner) {
23817 +		/* resource is not free => must suspend and wait */
23818 +
23819 +		init_waitqueue_entry(&wait, t);
23820 +
23821 +		/* FIXME: interruptible would be nice some day */
23822 +		set_task_state(t, TASK_UNINTERRUPTIBLE);
23823 +
23824 +		__add_wait_queue_tail_exclusive(&sem->wait, &wait);
23825 +
23826 +		/* check if we need to activate priority inheritance */
23827 +		if (edf_higher_prio(t, sem->hp_waiter)) {
23828 +			sem->hp_waiter = t;
23829 +			if (edf_higher_prio(t, sem->owner))
23830 +				set_priority_inheritance(sem->owner, sem->hp_waiter);
23831  		}
23832 -		raw_spin_unlock(&gsnedf_lock);
23833 +
23834 +		/* release lock before sleeping */
23835 +		spin_unlock_irqrestore(&sem->wait.lock, flags);
23836 +
23837 +		/* We depend on the FIFO order.  Thus, we don't need to recheck
23838 +		 * when we wake up; we are guaranteed to have the lock since
23839 +		 * there is only one wake up per release.
23840 +		 */
23841 +
23842 +		schedule();
23843 +
23844 +		/* Since we hold the lock, no other task will change
23845 +		 * ->owner. We can thus check it without acquiring the spin
23846 +		 * lock. */
23847 +		BUG_ON(sem->owner != t);
23848 +
23849 +		remove_wait_queue(&sem->wait, &wait);
23850 +	} else {
23851 +		/* it's ours now */
23852 +		sem->owner = t;
23853 +
23854 +		spin_unlock_irqrestore(&sem->wait.lock, flags);
23855  	}
23856  
23857  	return 0;
23858  }
23859  
23860 -static long gsnedf_inherit_priority(struct pi_semaphore *sem,
23861 -				    struct task_struct *new_owner)
23862 +int gsnedf_fmlp_unlock(struct litmus_lock* l)
23863  {
23864 -	/* We don't need to acquire the gsnedf_lock since at the time of this
23865 -	 * call new_owner isn't actually scheduled yet (it's still sleeping)
23866 -	 * and since the calling function already holds sem->wait.lock, which
23867 -	 * prevents concurrent sem->hp.task changes.
23868 -	 */
23869 +	struct task_struct *t = current, *next;
23870 +	struct fmlp_semaphore *sem = fmlp_from_lock(l);
23871 +	unsigned long flags;
23872 +	int err = 0;
23873 +
23874 +	spin_lock_irqsave(&sem->wait.lock, flags);
23875 +
23876 +	if (sem->owner != t) {
23877 +		err = -EINVAL;
23878 +		goto out;
23879 +	}
23880 +
23881 +	/* check if there are jobs waiting for this resource */
23882 +	next = waitqueue_first(&sem->wait);
23883 +	if (next) {
23884 +		/* next becomes the resouce holder */
23885 +		sem->owner = next;
23886 +		TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid);
23887 +
23888 +		/* determine new hp_waiter if necessary */
23889 +		if (next == sem->hp_waiter) {
23890 +			TRACE_TASK(next, "was highest-prio waiter\n");
23891 +			/* next has the highest priority --- it doesn't need to
23892 +			 * inherit.  However, we need to make sure that the
23893 +			 * next-highest priority in the queue is reflected in
23894 +			 * hp_waiter. */
23895 +			sem->hp_waiter = find_hp_waiter(sem, next);
23896 +			if (sem->hp_waiter)
23897 +				TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n");
23898 +			else
23899 +				TRACE("no further waiters\n");
23900 +		} else {
23901 +			/* Well, if next is not the highest-priority waiter,
23902 +			 * then it ought to inherit the highest-priority
23903 +			 * waiter's priority. */
23904 +			set_priority_inheritance(next, sem->hp_waiter);
23905 +		}
23906  
23907 -	if (sem->hp.task && sem->hp.task != new_owner) {
23908 -		new_owner->rt_param.inh_task = sem->hp.task;
23909 -		TRACE_TASK(new_owner, "inherited priority from %s/%d\n",
23910 -			   sem->hp.task->comm, sem->hp.task->pid);
23911 +		/* wake up next */
23912 +		wake_up_process(next);
23913  	} else
23914 -		TRACE_TASK(new_owner,
23915 -			   "cannot inherit priority, "
23916 -			   "no higher priority job waits.\n");
23917 -	return 0;
23918 +		/* becomes available */
23919 +		sem->owner = NULL;
23920 +
23921 +	/* we lose the benefit of priority inheritance (if any) */
23922 +	if (tsk_rt(t)->inh_task)
23923 +		clear_priority_inheritance(t);
23924 +
23925 +out:
23926 +	spin_unlock_irqrestore(&sem->wait.lock, flags);
23927 +
23928 +	return err;
23929  }
23930  
23931 -/* This function is called on a semaphore release, and assumes that
23932 - * the current task is also the semaphore holder.
23933 - */
23934 -static long gsnedf_return_priority(struct pi_semaphore *sem)
23935 +int gsnedf_fmlp_close(struct litmus_lock* l)
23936  {
23937 -	struct task_struct* t = current;
23938 -	int ret = 0;
23939 +	struct task_struct *t = current;
23940 +	struct fmlp_semaphore *sem = fmlp_from_lock(l);
23941 +	unsigned long flags;
23942  
23943 -        /* Find new highest-priority semaphore task
23944 -	 * if holder task is the current hp.task.
23945 -	 *
23946 -	 * Calling function holds sem->wait.lock.
23947 -	 */
23948 -	if (t == sem->hp.task)
23949 -		edf_set_hp_task(sem);
23950 +	int owner;
23951  
23952 -	TRACE_CUR("gsnedf_return_priority for lock %p\n", sem);
23953 +	spin_lock_irqsave(&sem->wait.lock, flags);
23954  
23955 -	if (t->rt_param.inh_task) {
23956 -		/* interrupts already disabled by PI code */
23957 -		raw_spin_lock(&gsnedf_lock);
23958 +	owner = sem->owner == t;
23959  
23960 -		/* Reset inh_task to NULL. */
23961 -		t->rt_param.inh_task = NULL;
23962 +	spin_unlock_irqrestore(&sem->wait.lock, flags);
23963  
23964 -		/* Check if rescheduling is necessary */
23965 -		unlink(t);
23966 -		gsnedf_job_arrival(t);
23967 -		raw_spin_unlock(&gsnedf_lock);
23968 -	}
23969 +	if (owner)
23970 +		gsnedf_fmlp_unlock(l);
23971  
23972 -	return ret;
23973 +	return 0;
23974  }
23975  
23976 -#endif
23977 +void gsnedf_fmlp_free(struct litmus_lock* lock)
23978 +{
23979 +	kfree(fmlp_from_lock(lock));
23980 +}
23981  
23982 -static long gsnedf_admit_task(struct task_struct* tsk)
23983 +static struct litmus_lock_ops gsnedf_fmlp_lock_ops = {
23984 +	.close  = gsnedf_fmlp_close,
23985 +	.lock   = gsnedf_fmlp_lock,
23986 +	.unlock = gsnedf_fmlp_unlock,
23987 +	.deallocate = gsnedf_fmlp_free,
23988 +};
23989 +
23990 +static struct litmus_lock* gsnedf_new_fmlp(void)
23991  {
23992 -	return 0;
23993 +	struct fmlp_semaphore* sem;
23994 +
23995 +	sem = kmalloc(sizeof(*sem), GFP_KERNEL);
23996 +	if (!sem)
23997 +		return NULL;
23998 +
23999 +	sem->owner   = NULL;
24000 +	sem->hp_waiter = NULL;
24001 +	init_waitqueue_head(&sem->wait);
24002 +	sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops;
24003 +
24004 +	return &sem->litmus_lock;
24005 +}
24006 +
24007 +/* **** lock constructor **** */
24008 +
24009 +
24010 +static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
24011 +				 void* __user unused)
24012 +{
24013 +	int err = -ENXIO;
24014 +
24015 +	/* GSN-EDF currently only supports the FMLP for global resources. */
24016 +	switch (type) {
24017 +
24018 +	case FMLP_SEM:
24019 +		/* Flexible Multiprocessor Locking Protocol */
24020 +		*lock = gsnedf_new_fmlp();
24021 +		if (*lock)
24022 +			err = 0;
24023 +		else
24024 +			err = -ENOMEM;
24025 +		break;
24026 +
24027 +	};
24028 +
24029 +	return err;
24030  }
24031  
24032 +#endif
24033 +
24034 +
24035  static long gsnedf_activate_plugin(void)
24036  {
24037  	int cpu;
24038 @@ -795,14 +960,11 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
24039  	.schedule		= gsnedf_schedule,
24040  	.task_wake_up		= gsnedf_task_wake_up,
24041  	.task_block		= gsnedf_task_block,
24042 -#ifdef CONFIG_FMLP
24043 -	.fmlp_active		= 1,
24044 -	.pi_block		= gsnedf_pi_block,
24045 -	.inherit_priority	= gsnedf_inherit_priority,
24046 -	.return_priority	= gsnedf_return_priority,
24047 -#endif
24048  	.admit_task		= gsnedf_admit_task,
24049  	.activate_plugin	= gsnedf_activate_plugin,
24050 +#ifdef CONFIG_LITMUS_LOCKING
24051 +	.allocate_lock		= gsnedf_allocate_lock,
24052 +#endif
24053  };
24054  
24055  
24056 diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
24057 index e695289..0687be0 100644
24058 --- a/litmus/sched_litmus.c
24059 +++ b/litmus/sched_litmus.c
24060 @@ -194,6 +194,9 @@ static void dequeue_task_litmus(struct rq *rq, struct task_struct *p,
24061  
24062  static void yield_task_litmus(struct rq *rq)
24063  {
24064 +	TS_SYSCALL_IN_START;
24065 +
24066 +	TS_SYSCALL_OUT_END;
24067  	BUG_ON(rq->curr != current);
24068  	/* sched_yield() is called to trigger delayed preemptions.
24069  	 * Thus, mark the current task as needing to be rescheduled.
24070 @@ -202,6 +205,8 @@ static void yield_task_litmus(struct rq *rq)
24071  	 */
24072  	clear_exit_np(current);
24073  	litmus_reschedule_local();
24074 +
24075 +	TS_SYSCALL_OUT_START;
24076  }
24077  
24078  /* Plugins are responsible for this.
24079 diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
24080 new file mode 100644
24081 index 0000000..c4e5253
24082 --- /dev/null
24083 +++ b/litmus/sched_mc.c
24084 @@ -0,0 +1,1769 @@
24085 +/**
24086 + * litmus/sched_mc.c
24087 + *
24088 + * Implementation of the Mixed Criticality scheduling algorithm.
24089 + *
24090 + * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010)
24091 + *
24092 + * Absolute first: relative time spent doing different parts of release
24093 + * and scheduling overhead needs to be measured and graphed.
24094 + *
24095 + * Domain locks should be more fine-grained. There is no reason to hold the
24096 + * ready-queue lock when adding a task to the release-queue.
24097 + *
24098 + * The levels should be converted to linked-lists so that they are more
24099 + * adaptable and need not be identical on all processors.
24100 + *
24101 + * The interaction between remove_from_all and other concurrent operations
24102 + * should be re-examined. If a job_completion and a preemption happen
24103 + * simultaneously, a task could be requeued, removed, then requeued again.
24104 + *
24105 + * Level-C tasks should be able to swap CPUs a-la GSN-EDF. They should also
24106 + * try and swap with the last CPU they were on. This could be complicated for
24107 + * ghost tasks.
24108 + *
24109 + * Locking for timer-merging could be infinitely more fine-grained. A second
24110 + * hash could select a lock to use based on queue slot. This approach might
24111 + * also help with add_release in rt_domains.
24112 + *
24113 + * It should be possible to reserve a CPU for ftdumping.
24114 + *
24115 + * The real_deadline business seems sloppy.
24116 + *
24117 + * The amount of data in the header file should be cut down. The use of the
24118 + * header file in general needs to be re-examined.
24119 + *
24120 + * The plugin needs to be modified so that it doesn't freeze when it is
24121 + * deactivated in a VM.
24122 + *
24123 + * The locking in check_for_preempt is not fine-grained enough.
24124 + *
24125 + * The size of the structures could be smaller. Debugging info might be
24126 + * excessive as things currently stand.
24127 + *
24128 + * The macro can_requeue has been expanded too much. Anything beyond
24129 + * scheduled_on is a hack!
24130 + *
24131 + * Domain names (rt_domain) are still clumsy.
24132 + *
24133 + * Should BE be moved into the kernel? This will require benchmarking.
24134 + */
24135 +
24136 +#include <asm/uaccess.h>
24137 +#include <linux/uaccess.h>
24138 +#include <linux/spinlock.h>
24139 +#include <linux/percpu.h>
24140 +#include <linux/sched.h>
24141 +#include <linux/hrtimer.h>
24142 +#include <linux/slab.h>
24143 +#include <linux/module.h>
24144 +#include <linux/poison.h>
24145 +#include <linux/pid.h>
24146 +#include <linux/circ_buf.h>
24147 +#include <asm/i387.h>
24148 +
24149 +#include <litmus/litmus.h>
24150 +#include <litmus/trace.h>
24151 +#include <litmus/jobs.h>
24152 +#include <litmus/sched_plugin.h>
24153 +#include <litmus/edf_common.h>
24154 +#include <litmus/sched_trace.h>
24155 +#include <litmus/domain.h>
24156 +#include <litmus/bheap.h>
24157 +#include <litmus/event_group.h>
24158 +#include <litmus/budget.h>
24159 +
24160 +#include <litmus/sched_mc.h>
24161 +#include <litmus/ce_domain.h>
24162 +
24163 +/**
24164 + * struct cpu_entry - State of a CPU for the entire MC system
24165 + * @cpu		  CPU id
24166 + * @scheduled	  Task that is physically running
24167 + * @linked	  Task that should be running / is logically running
24168 + * @lock	  For serialization
24169 + * @crit_entries  Array of CPU state per criticality level
24170 + * @redir	  List of redirected work for this CPU.
24171 + * @redir_lock	  Lock for @redir.
24172 + * @event_group	  Event group for timer merging.
24173 + */
24174 +struct cpu_entry {
24175 +	int			cpu;
24176 +	struct task_struct*	scheduled;
24177 +	struct task_struct*	will_schedule;
24178 +	struct task_struct*	linked;
24179 +	raw_spinlock_t		lock;
24180 +	struct crit_entry	crit_entries[NUM_CRIT_LEVELS];
24181 +#ifdef CONFIG_PLUGIN_MC_REDIRECT
24182 +	struct list_head	redir;
24183 +	raw_spinlock_t		redir_lock;
24184 +#endif
24185 +#ifdef CONFIG_MERGE_TIMERS
24186 +	struct event_group *event_group;
24187 +#endif
24188 +};
24189 +
24190 +DEFINE_PER_CPU(struct cpu_entry, cpus);
24191 +
24192 +#ifdef CONFIG_RELEASE_MASTER
24193 +static int interrupt_cpu;
24194 +#endif
24195 +
24196 +#define domain_data(dom)  (container_of(dom, struct domain_data, domain))
24197 +#define is_global(dom)    (domain_data(dom)->heap)
24198 +#define is_global_task(t) (is_global(get_task_domain(t)))
24199 +#define can_use(ce) \
24200 +	((ce)->state == CS_ACTIVE || (ce->state == CS_ACTIVATE))
24201 +#define can_requeue(t)							\
24202 +	((t)->rt_param.linked_on == NO_CPU && /* Not linked anywhere */ \
24203 +	 !is_queued(t) &&	              /* Not gonna be linked */ \
24204 +	 (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU))
24205 +#define entry_level(e) \
24206 +	(((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1)
24207 +#define crit_cpu(ce) \
24208 +	(container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries))
24209 +#define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level])
24210 +#define TRACE_ENTRY(e, fmt, args...)				\
24211 +	STRACE("P%d, linked=" TS " " fmt, e->cpu, TA(e->linked), ##args)
24212 +#define TRACE_CRIT_ENTRY(ce, fmt, args...)			\
24213 +	STRACE("%s P%d, linked=" TS " " fmt,			\
24214 +	      (ce)->domain->name, crit_cpu(ce)->cpu, TA((ce)->linked), ##args)
24215 +
24216 +rt_domain_t _mc_crit_c_rt;
24217 +atomic_t ntask;
24218 +
24219 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
24220 +static lt_t first_task_arrival = 0;
24221 +pid_t signal_pid;
24222 +spinlock_t report_queue_lock;
24223 +static struct report_data* report_queue;
24224 +static struct report_data* release_report_queue;
24225 +static struct report_data* release_report_queue2;
24226 +atomic_t num_report_queue;
24227 +atomic_t num_release_report_queue;
24228 +atomic_t num_release_report_queue2;
24229 +unsigned long long report_seq = 0;
24230 +unsigned long long release_report_seq = 0;
24231 +atomic_t pend_release_signal;
24232 +static struct virt_timer _mc_crit_c_virt_timer;
24233 +
24234 +void send_release_signal_to_monitor(void);
24235 +
24236 +lt_t act_to_virt(rt_domain_t* rt, lt_t act)
24237 +{
24238 +    lt_t vtime;
24239 +    kernel_fpu_begin();
24240 +    {
24241 +        double speed = (double)rt->virt_timer->speed_num/(double)rt->virt_timer->speed_denom;
24242 +        vtime = (double)(act - rt->virt_timer->last_act)*speed;
24243 +    }
24244 +    kernel_fpu_end();
24245 +    vtime += rt->virt_timer->last_virt;
24246 +    return vtime;
24247 +}
24248 +    
24249 +lt_t virt_to_act(rt_domain_t* rt, lt_t virt)
24250 +{
24251 +    lt_t atime;
24252 +    kernel_fpu_begin();
24253 +    {
24254 +        double speed = (double)rt->virt_timer->speed_num/(double)rt->virt_timer->speed_denom;
24255 +        atime = (double)(virt - rt->virt_timer->last_virt)/speed;
24256 +    }
24257 +    kernel_fpu_end();
24258 +    atime += rt->virt_timer->last_act;
24259 +    return atime;
24260 +}
24261 +
24262 +asmlinkage long sys_get_job_report(int type, int *num_report, struct report_data __user * param)
24263 +{
24264 +	int retval = -EINVAL;
24265 +    if (param == 0)
24266 +            return retval;
24267 +        
24268 +    if (type == 0) { // release
24269 +        retval = put_user(atomic_read(&num_release_report_queue), num_report);
24270 +        retval = copy_to_user(param, release_report_queue, sizeof(struct report_data)*atomic_read(&num_release_report_queue));
24271 +        atomic_set(&num_release_report_queue, 0);
24272 +    }
24273 +    else if (type == 1) { // release2
24274 +        retval = put_user(atomic_read(&num_release_report_queue2), num_report);
24275 +        retval = copy_to_user(param, release_report_queue2, sizeof(struct report_data)*atomic_read(&num_release_report_queue2));
24276 +        atomic_set(&num_release_report_queue2, 0);
24277 +    }
24278 +    else if (type == 2) { // complete
24279 +        struct report_data *p_report = report_queue;
24280 +        retval = put_user(atomic_read(&num_report_queue), num_report);
24281 +        retval = copy_to_user(param, p_report, sizeof(struct report_data)*atomic_read(&num_report_queue));
24282 +        atomic_set(&num_report_queue, 0);
24283 +    }
24284 +    
24285 +	return retval;
24286 +}
24287 +#else
24288 +asmlinkage long sys_get_job_report(int type, int *num_report, struct report_data __user * param)
24289 +{
24290 +    return -EINVAL;
24291 +}
24292 +#endif
24293 +
24294 +/*
24295 + * Sort CPUs within a global domain's heap.
24296 + */
24297 +static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
24298 +{
24299 +	struct domain *domain;
24300 +	struct crit_entry *first, *second;
24301 +	struct task_struct *first_link, *second_link;
24302 +
24303 +	first  = a->value;
24304 +	second = b->value;
24305 +	first_link  = first->linked;
24306 +	second_link = second->linked;
24307 +
24308 +	if (first->state == CS_REMOVED || second->state == CS_REMOVED) {
24309 +		/* Removed entries go at the back of the heap */
24310 +		return first->state  != CS_REMOVED &&
24311 +		       second->state != CS_REMOVED;
24312 +	} else if (!first_link || !second_link) {
24313 +		/* Entry with nothing scheduled is lowest priority */
24314 +		return second_link && !first_link;
24315 +	} else {
24316 +		/* Sort by deadlines of tasks */
24317 +		domain = get_task_domain(first_link);
24318 +		return domain->higher_prio(second_link, first_link);
24319 +	}
24320 +}
24321 +
24322 +/*
24323 + * Return true if the domain has a higher priority ready task. The @curr
24324 + * task must belong to the domain.
24325 + */
24326 +static int mc_preempt_needed(struct domain *dom, struct task_struct* curr)
24327 +{
24328 +	struct task_struct *next = dom->peek_ready(dom);
24329 +	if (!next || !curr) {
24330 +		return next && !curr;
24331 +	} else {
24332 +		BUG_ON(tsk_mc_crit(next) != tsk_mc_crit(curr));
24333 +		return get_task_domain(next)->higher_prio(next, curr);
24334 +	}
24335 +}
24336 +
24337 +/*
24338 + * Update crit entry position in a global heap. Caller must hold
24339 + * @ce's domain lock.
24340 + */
24341 +static inline void update_crit_position(struct crit_entry *ce)
24342 +{
24343 +	struct bheap *heap;
24344 +	if (is_global(ce->domain)) {
24345 +		heap = domain_data(ce->domain)->heap;
24346 +		BUG_ON(!heap);
24347 +		BUG_ON(!bheap_node_in_heap(ce->node));
24348 +		bheap_delete(cpu_lower_prio, heap, ce->node);
24349 +		bheap_insert(cpu_lower_prio, heap, ce->node);
24350 +	}
24351 +}
24352 +
24353 +/*
24354 + * Update crit entry position in a global heap if it has been marked
24355 + * for update. Caller must hold @ce's domain lock.
24356 + */
24357 +static void fix_crit_position(struct crit_entry *ce)
24358 +{
24359 +	if (is_global(ce->domain)) {
24360 +		if (CS_ACTIVATE == ce->state) {
24361 +			ce->state = CS_ACTIVE;
24362 +			update_crit_position(ce);
24363 +		} else if (CS_REMOVE == ce->state) {
24364 +			ce->state = CS_REMOVED;
24365 +			update_crit_position(ce);
24366 +		}
24367 +	}
24368 +}
24369 +
24370 +/*
24371 + * Return next CPU which should preempted or NULL if the domain has no
24372 + * preemptable CPUs. Caller must hold the @dom lock.
24373 + */
24374 +static inline struct crit_entry* lowest_prio_cpu(struct domain *dom)
24375 +{
24376 +	struct bheap *heap = domain_data(dom)->heap;
24377 +	struct bheap_node* hn;
24378 +	struct crit_entry *ce, *res = NULL;
24379 +	do {
24380 +		hn = bheap_peek(cpu_lower_prio, heap);
24381 +		ce = (hn) ? hn->value : NULL;
24382 +		if (ce) {
24383 +			if (ce->state == CS_ACTIVE)
24384 +				res = ce;
24385 +			else if (ce->state == CS_REMOVED)
24386 +				ce = NULL;
24387 +			else
24388 +				fix_crit_position(ce);
24389 +		}
24390 +	} while (ce && !res);
24391 +	return res;
24392 +}
24393 +
24394 +/*
24395 + * Cancel ghost timer.
24396 + */
24397 +static inline void cancel_ghost(struct crit_entry *ce)
24398 +{
24399 +#ifdef CONFIG_MERGE_TIMERS
24400 +	cancel_event(&ce->event);
24401 +#else
24402 +	hrtimer_try_to_cancel(&ce->timer);
24403 +#endif
24404 +}
24405 +
24406 +/*
24407 + * Arm ghost timer. Will merge timers if the option is specified.
24408 + */
24409 +static inline void arm_ghost(struct crit_entry *ce, lt_t fire)
24410 +{
24411 +#ifdef CONFIG_MERGE_TIMERS
24412 +	add_event(crit_cpu(ce)->event_group, &ce->event, fire);
24413 +#else
24414 +	__hrtimer_start_range_ns(&ce->timer,
24415 +				 ns_to_ktime(fire),
24416 +				 0 /* delta */,
24417 +				 HRTIMER_MODE_ABS_PINNED,
24418 +				 0 /* no wakeup */);
24419 +#endif
24420 +}
24421 +
24422 +/*
24423 + * Time accounting for ghost tasks.
24424 + * Must be called before a decision is made involving the task's budget.
24425 + */
24426 +static void update_ghost_time(struct task_struct *p)
24427 +{
24428 +	u64 clock = litmus_clock();
24429 +	u64 delta = clock - p->se.exec_start;
24430 +	BUG_ON(!is_ghost(p));
24431 +	if (unlikely ((s64)delta < 0)) {
24432 +		delta = 0;
24433 +		TRACE_MC_TASK(p, "WARNING: negative time delta\n");
24434 +	}
24435 +	if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) {
24436 +		TRACE_MC_TASK(p, "Ghost job could have ended\n");
24437 +		tsk_mc_data(p)->mc_job.ghost_budget = 0;
24438 +		p->se.exec_start = clock;
24439 +	} else {
24440 +		TRACE_MC_TASK(p, "Ghost job updated, but didn't finish\n");
24441 +		tsk_mc_data(p)->mc_job.ghost_budget -= delta;
24442 +		p->se.exec_start = clock;
24443 +	}
24444 +}
24445 +
24446 +/**
24447 + * link_task_to_crit() - Logically run a task at a criticality level.
24448 + * Caller must hold @ce's CPU lock.
24449 + */
24450 +static void link_task_to_crit(struct crit_entry *ce,
24451 +			      struct task_struct *task)
24452 +{
24453 +	lt_t when_to_fire;
24454 +
24455 +	TRACE_CRIT_ENTRY(ce, "Linking " TS "\n", TA(task));
24456 +	
24457 +	BUG_ON(!can_use(ce) && task);
24458 +	BUG_ON(task && tsk_rt(task)->linked_on != NO_CPU);
24459 +	BUG_ON(task && is_global(ce->domain) &&
24460 +	       !bheap_node_in_heap(ce->node));
24461 +
24462 +	/* Unlink last task */
24463 +	if (ce->linked) {
24464 +		TRACE_MC_TASK(ce->linked, "Unlinking\n");
24465 +		ce->linked->rt_param.linked_on = NO_CPU;
24466 +		if (is_ghost(ce->linked)) {
24467 +			cancel_ghost(ce);
24468 +			if (tsk_mc_data(ce->linked)->mc_job.ghost_budget > 0) {
24469 +				/* Job isn't finished, so do accounting */
24470 +				update_ghost_time(ce->linked);
24471 +			}
24472 +		}
24473 +	}
24474 +
24475 +	/* Actually link task */
24476 +	ce->linked = task;
24477 +	if (task) {
24478 +		task->rt_param.linked_on = crit_cpu(ce)->cpu;
24479 +		if (is_ghost(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) {
24480 +			/* There is a level-A timer that will force a
24481 +			 * preemption, so we don't set this for level-A
24482 +			 * tasks. Otherwise reset the budget timer.
24483 +			 */
24484 +			task->se.exec_start = litmus_clock();
24485 +			when_to_fire = task->se.exec_start +
24486 +				tsk_mc_data(task)->mc_job.ghost_budget;
24487 +			arm_ghost(ce, when_to_fire);
24488 +		}
24489 +	}
24490 +}
24491 +
24492 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
24493 +inline static int check_report_queue(pid_t pid, unsigned int job_no)
24494 +{
24495 +    int i;
24496 +    int ret = 0;
24497 +    for (i=0; i<atomic_read(&num_release_report_queue); i++) {
24498 +        if (release_report_queue[i].pid == pid && release_report_queue[i].job_no == job_no)
24499 +            ret = 1;
24500 +    }
24501 +    
24502 +    return ret;
24503 +}
24504 +#endif
24505 +
24506 +static void check_for_preempt(struct domain*);
24507 +
24508 +/**
24509 + * job_arrival() - Called when a task re-enters the system.
24510 + * Caller must hold no locks.
24511 + */
24512 +static void job_arrival(struct task_struct *task)
24513 +{
24514 +	struct domain *dom = get_task_domain(task);
24515 +    rt_domain_t *rt = (rt_domain_t*)dom->data;
24516 +    
24517 +	TRACE_MC_TASK(task, "Job arriving at domain %s\n", dom->name);
24518 +    BUG_ON(!task);
24519 +
24520 +	raw_spin_lock(dom->lock);
24521 +	if (can_requeue(task)) {
24522 +		BUG_ON(task->rt_param.linked_on != NO_CPU);
24523 +        if (task->rt_param.domain == NULL)
24524 +            task->rt_param.domain = rt;
24525 +        dom->requeue(dom, task);
24526 +		check_for_preempt(dom);
24527 +	} else {
24528 +		/* If a global task is scheduled on one cpu, it CANNOT
24529 +		 * be requeued into a global domain. Another cpu might
24530 +		 * dequeue the global task before it is descheduled,
24531 +		 * causing the system to crash when the task is scheduled
24532 +		 * in two places simultaneously.
24533 +		 */
24534 +		TRACE_MC_TASK(task, "Delayed arrival of scheduled task\n");
24535 +	}
24536 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME	
24537 +    if ((rt->level == CRIT_LEVEL_C) && (task->pid != signal_pid) && (task->rt_param.job_params.job_no > START_JOB_NO) && (get_exec_time(task) == 0) && is_released(task, litmus_clock())) {
24538 +        if (check_report_queue(task->pid, task->rt_param.job_params.job_no) == 0) {
24539 +            int i = atomic_read(&num_release_report_queue2);
24540 +            release_report_queue2[i].seq = release_report_seq++;
24541 +            release_report_queue2[i].pid = task->pid;
24542 +            release_report_queue2[i].job_no = task->rt_param.job_params.job_no;
24543 +            atomic_inc(&num_release_report_queue2);
24544 +            if (atomic_read(&num_release_report_queue2) > 1000)
24545 +                atomic_set(&num_release_report_queue2, 1);
24546 +            atomic_set(&pend_release_signal, 1);
24547 +        }
24548 +    }
24549 +#endif    
24550 +    raw_spin_unlock(dom->lock);
24551 +}
24552 +
24553 +/**
24554 + * low_prio_arrival() - If CONFIG_PLUGIN_MC_REDIRECT is enabled, will
24555 + * redirect a lower priority job_arrival work to the interrupt_cpu.
24556 + */
24557 +static void low_prio_arrival(struct task_struct *task)
24558 +{
24559 +#ifdef  CONFIG_PLUGIN_MC_REDIRECT
24560 +	struct cpu_entry *entry;
24561 +#endif
24562 +	/* Race conditions! */
24563 +	if (!can_requeue(task)) return;
24564 +
24565 +#ifdef  CONFIG_PLUGIN_MC_REDIRECT
24566 +	if (!is_global_task(task))
24567 +		goto arrive;
24568 +	if (smp_processor_id() != interrupt_cpu) {
24569 +		entry = &__get_cpu_var(cpus);
24570 +		raw_spin_lock(&entry->redir_lock);
24571 +		TRACE_MC_TASK(task, "Adding to redirect queue\n");
24572 +		list_add(&tsk_rt(task)->list, &entry->redir);
24573 +		raw_spin_unlock(&entry->redir_lock);
24574 +		litmus_reschedule(interrupt_cpu);
24575 +	} else
24576 +#endif
24577 +	{
24578 +#ifdef  CONFIG_PLUGIN_MC_REDIRECT	
24579 +arrive:
24580 +#endif
24581 +		job_arrival(task);
24582 +	}
24583 +}
24584 +
24585 +#ifdef CONFIG_PLUGIN_MC_REDIRECT
24586 + * fix_global_levels() - Execute redirected job arrivals on this cpu.
24587 +/**
24588 + */
24589 +static void fix_global_levels(void)
24590 +{
24591 +	int c;
24592 +	struct cpu_entry *e;
24593 +	struct list_head *pos, *safe;
24594 +	struct task_struct *t;
24595 +
24596 +	//STRACE("Fixing global levels\n");
24597 +	for_each_online_cpu(c) {
24598 +		e = &per_cpu(cpus, c);
24599 +		raw_spin_lock(&e->redir_lock);
24600 +		list_for_each_safe(pos, safe, &e->redir) {
24601 +			t = list_entry(pos, struct task_struct, rt_param.list);
24602 +			BUG_ON(!t);
24603 +			TRACE_MC_TASK(t, "Dequeued redirected job\n");
24604 +			list_del_init(pos);
24605 +			job_arrival(t);
24606 +		}
24607 +		raw_spin_unlock(&e->redir_lock);
24608 +	}
24609 +}
24610 +#endif
24611 +
24612 +/**
24613 + * link_task_to_cpu() - Logically run a task on a CPU.
24614 + * The task must first have been linked to one of the CPU's crit_entries.
24615 + * Caller must hold the entry lock.
24616 + */
24617 +static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task)
24618 +{
24619 +	int i = entry_level(entry);
24620 +	struct crit_entry *ce;
24621 +	TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu);
24622 +	BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu);
24623 +	BUG_ON(task && is_ghost(task));
24624 +
24625 +	if (task){
24626 +		set_rt_flags(task, RT_F_RUNNING);
24627 +	}
24628 +	entry->linked = task;
24629 +
24630 +	/* Higher criticality crit entries are now usable */
24631 +	for (; i < entry_level(entry) + 1; i++) {
24632 +		ce = &entry->crit_entries[i];
24633 +		if (!can_use(ce)) {
24634 +			ce->state = CS_ACTIVATE;
24635 +		}
24636 +	}
24637 +}
24638 +
24639 +/**
24640 + * preempt() - Preempt a logically running task with a higher priority one.
24641 + * @dom	Domain from which to draw higher priority task
24642 + * @ce	CPU criticality level to preempt
24643 + *
24644 + * Caller must hold the lock for @dom and @ce's CPU lock.
24645 + */
24646 +static void preempt(struct domain *dom, struct crit_entry *ce)
24647 +{
24648 +	struct task_struct *task = dom->take_ready(dom);
24649 +	struct cpu_entry *entry = crit_cpu(ce);
24650 +	struct task_struct *old = ce->linked;
24651 +
24652 +	BUG_ON(!task);
24653 +	TRACE_CRIT_ENTRY(ce, "Preempted by " TS "\n", TA(task));
24654 +
24655 +	/* Per-domain preemption */
24656 +	link_task_to_crit(ce, task);
24657 +	if (old && can_requeue(old)) {
24658 +		dom->requeue(dom, old);
24659 +	}
24660 +	update_crit_position(ce);
24661 +
24662 +	/* Preempt actual execution if this is a running task */
24663 +	if (!is_ghost(task)) {
24664 +		link_task_to_cpu(entry, task);
24665 +		preempt_if_preemptable(entry->scheduled, entry->cpu);
24666 +	} else if (old && old == entry->linked) {
24667 +		/* Preempted a running task with a ghost job. Null needs to be
24668 +		 * running.
24669 +		 */
24670 +		link_task_to_cpu(entry, NULL);
24671 +		preempt_if_preemptable(entry->scheduled, entry->cpu);
24672 +	}
24673 +}
24674 +
24675 +/**
24676 + * update_crit_levels() - Update criticality entries for the new cpu state.
24677 + * This should be called after a new task has been linked to @entry.
24678 + * The caller must hold the @entry->lock, but this method will release it.
24679 + */
24680 +static void update_crit_levels(struct cpu_entry *entry)
24681 +{
24682 +	int i, global_preempted;
24683 +	struct crit_entry *ce;
24684 +	struct task_struct *readmit[NUM_CRIT_LEVELS];
24685 +	enum crit_level level = entry_level(entry);
24686 +
24687 +	/* Remove lower priority tasks from the entry */
24688 +	for (i = level + 1; i < NUM_CRIT_LEVELS; i++) {
24689 +		ce = &entry->crit_entries[i];
24690 +
24691 +		global_preempted = ce->linked &&
24692 +			/* This task is running on a cpu */
24693 +			ce->linked->rt_param.scheduled_on == entry->cpu &&
24694 +			/* But it was preempted */
24695 +			ce->linked != entry->linked &&
24696 +			/* And it is an eligible global task */
24697 +			!is_ghost(ce->linked) && is_global(ce->domain);
24698 +
24699 +		/* Do not readmit global tasks which are preempted! These can't
24700 +		 * ever be re-admitted until they are descheduled for reasons
24701 +		 * explained in job_arrival.
24702 +		 */
24703 +		readmit[i] = (!global_preempted) ? ce->linked : NULL;
24704 +
24705 +		ce->state = CS_REMOVE;
24706 +		if (ce->linked)
24707 +			link_task_to_crit(ce, NULL);
24708 +	}
24709 +	/* Need to unlock so we can access domains */
24710 +	raw_spin_unlock(&entry->lock);
24711 +
24712 +	/* Re-admit tasks to the system */
24713 +	for (i = level + 1; i < NUM_CRIT_LEVELS; i++) {
24714 +		ce = &entry->crit_entries[i];
24715 +		if (readmit[i]) {
24716 +			low_prio_arrival(readmit[i]);
24717 +		}
24718 +	}
24719 +}
24720 +
24721 +/**
24722 + * check_for_preempt() - Causes a preemption if higher-priority tasks are ready.
24723 + * Caller must hold domain lock.
24724 + * Makes gigantic nasty assumption that there is 1 global criticality level,
24725 + * and it is the last one in each list, so it doesn't call update_crit..
24726 + */
24727 +static void check_for_preempt(struct domain *dom)
24728 +{
24729 +	int recheck = 1;
24730 +	struct cpu_entry *entry;
24731 +	struct crit_entry *ce;
24732 +
24733 +	if (is_global(dom)) {
24734 +		/* Loop until we find a non-preemptable CPU */
24735 +		while ((ce = lowest_prio_cpu(dom)) && recheck) {
24736 +			entry = crit_cpu(ce);
24737 +			recheck = 1;
24738 +
24739 +			/* Cache next task */
24740 +			dom->peek_ready(dom);
24741 +
24742 +			raw_spin_lock(&entry->lock);
24743 +			if (!can_use(ce))
24744 +				/* CPU disabled while locking! */
24745 +				fix_crit_position(ce);
24746 +			else if (dom->preempt_needed(dom, ce->linked))
24747 +				/* Success! Check for more preemptions */
24748 +				preempt(dom, ce);
24749 +			else {
24750 +				/* Failure! */
24751 +				recheck = 0;
24752 +				TRACE_CRIT_ENTRY(ce, "Stopped global check\n");
24753 +			}
24754 +			raw_spin_unlock(&entry->lock);
24755 +		}
24756 +	} else /* Partitioned */ {
24757 +		ce = domain_data(dom)->crit_entry;
24758 +		entry = crit_cpu(ce);
24759 +
24760 +		/* Cache next task */
24761 +		dom->peek_ready(dom);
24762 +
24763 +		raw_spin_lock(&entry->lock);
24764 +		if (can_use(ce) && dom->preempt_needed(dom, ce->linked)) {
24765 +			preempt(dom, ce);
24766 +			update_crit_levels(entry);
24767 +		} else {
24768 +			raw_spin_unlock(&entry->lock);
24769 +		}
24770 +	}
24771 +}
24772 +
24773 +/**
24774 + * remove_from_all() - Logically remove a task from all structures.
24775 + * Caller must hold no locks.
24776 + */
24777 +static void remove_from_all(struct task_struct* task)
24778 +{
24779 +	int update = 0;
24780 +   	struct cpu_entry *entry;
24781 +	struct crit_entry *ce;
24782 +	struct domain *dom = get_task_domain(task);
24783 +
24784 +	TRACE_MC_TASK(task, "Removing from everything\n");
24785 +	BUG_ON(!task);
24786 +
24787 +	raw_spin_lock(dom->lock);
24788 +
24789 +	/* Remove the task from any CPU state */
24790 +	if (task->rt_param.linked_on != NO_CPU) {
24791 +		entry = &per_cpu(cpus, task->rt_param.linked_on);
24792 +		raw_spin_lock(&entry->lock);
24793 +
24794 +		/* Unlink only if task is still linked post lock */
24795 +		ce = &entry->crit_entries[tsk_mc_crit(task)];
24796 +		if (task->rt_param.linked_on != NO_CPU) {
24797 +			BUG_ON(ce->linked != task);
24798 +			link_task_to_crit(ce, NULL);
24799 +			update_crit_position(ce);
24800 +			if (!is_ghost(task) && entry->linked == task) {
24801 +				update = 1;
24802 +				link_task_to_cpu(entry, NULL);
24803 +			}
24804 +		} else {
24805 +			TRACE_MC_TASK(task, "Unlinked before we got lock!\n");
24806 +		}
24807 +		if (update)
24808 +			update_crit_levels(entry);
24809 +		else
24810 +			raw_spin_unlock(&entry->lock);
24811 +	} else {
24812 +		TRACE_MC_TASK(task, "Not linked to anything\n");
24813 +	}
24814 +
24815 +	/* Ensure the task isn't returned by its domain */
24816 +	dom->remove(dom, task);
24817 +
24818 +	raw_spin_unlock(dom->lock);
24819 +}
24820 +
24821 +/**
24822 + * job_completion() - Update task state and re-enter it into the system.
24823 + * Converts tasks which have completed their execution early into ghost jobs.
24824 + * Caller must hold no locks.
24825 + */
24826 +static void job_completion(struct task_struct *task, int forced)
24827 +{
24828 +	int behind;
24829 +	TRACE_MC_TASK(task, "Completed\n");
24830 +    
24831 +	/* Logically stop the task execution */
24832 +	set_rt_flags(task, RT_F_SLEEP);
24833 +	remove_from_all(task);
24834 +
24835 +	/* Level-A tasks cannot ever get behind */
24836 +	behind = tsk_mc_crit(task) != CRIT_LEVEL_A && behind_server(task);
24837 +
24838 +	/* sched_MC BUG!! */
24839 +	/* if budget is not exhausted, make it a ghost job and requeue the job */
24840 +	
24841 +    if (!forced && !budget_exhausted(task) && !is_ghost(task)) {
24842 +		// Convert to ghost job
24843 +		tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task);
24844 +		tsk_mc_data(task)->mc_job.is_ghost = 1;
24845 +		TRACE_MC_TASK(task, "Budget is not exhausted. update_ghost_time now.\n");
24846 +		update_ghost_time(task);
24847 +		if (tsk_mc_data(task)->mc_job.ghost_budget != 0) {
24848 +			TRACE_MC_TASK(task, "Ghost budget is not exhausted and requeued as a ghost.\n");
24849 +			if (is_running(task)) {
24850 +				job_arrival(task);
24851 +				return;
24852 +			}
24853 +		}
24854 +		
24855 +		tsk_rt(task)->job_params.exec_time += budget_remaining(task);
24856 +		tsk_mc_data(task)->mc_job.is_ghost = 0;
24857 +	}
24858 +	else if (!forced && is_ghost(task)) {
24859 +		update_ghost_time(task);
24860 +		if (tsk_mc_data(task)->mc_job.ghost_budget != 0)
24861 +			TRACE_MC_TASK(task, " GHOST AGAIN!!!!\n");
24862 +	}
24863 +	
24864 +	
24865 +	//if (!forced && !is_ghost(task)) {
24866 +	if (!forced) {
24867 +		/* Task voluntarily ceased execution. Move on to next period */
24868 +        if (task->pid == signal_pid && get_exec_time(task) != 0)
24869 +            printk(KERN_DEBUG "MON/%d, %llu\n", task->rt_param.job_params.job_no, get_exec_time(task));
24870 +		task_release(task);
24871 +		sched_trace_task_completion(task, forced);
24872 +		
24873 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
24874 +		/* report to monitoring program here */
24875 +		if ((tsk_mc_crit(task) == CRIT_LEVEL_C) && (signal_pid != task->pid)
24876 +                    && (task->rt_param.job_params.job_no > START_JOB_NO)) {
24877 +            unsigned long flags;
24878 +            lt_t act_now = litmus_clock();
24879 +            lt_t virt_now = act_to_virt(get_task_rdomain(task), act_now);
24880 +
24881 +			if (task->rt_param.job_params.placeholder && (get_virt_priority_point(task) < virt_now)) {
24882 +				task->rt_param.job_params.act_priority_point = virt_to_act(get_task_rdomain(task), get_virt_priority_point(task));
24883 +				task->rt_param.job_params.placeholder = 0;
24884 +			}
24885 +			/* make report item .release, task_pid, .act_priority_point, virt_now, Level-C ready queue emptiness */
24886 +            spin_lock_irqsave(&report_queue_lock, flags);
24887 +            report_queue[atomic_read(&num_report_queue)].seq = report_seq++;
24888 +            report_queue[atomic_read(&num_report_queue)].pid = task->pid;
24889 +            report_queue[atomic_read(&num_report_queue)].job_no = task->rt_param.job_params.job_no-1;
24890 +            report_queue[atomic_read(&num_report_queue)].release_time = get_release(task);
24891 +            report_queue[atomic_read(&num_report_queue)].priority_point = get_act_priority_point(task);
24892 +            report_queue[atomic_read(&num_report_queue)].now = act_now;
24893 +            report_queue[atomic_read(&num_report_queue)].rq_empty = bheap_empty(&get_task_rdomain(task)->ready_queue);
24894 +            atomic_inc(&num_report_queue);
24895 +            //BUG_ON(atomic_read(&num_report_queue) > 2000);
24896 +            if (atomic_read(&num_report_queue) > 1000)
24897 +                atomic_set(&num_report_queue, 0);
24898 +            spin_unlock_irqrestore(&report_queue_lock, flags);
24899 +		}
24900 +#endif        
24901 +	}
24902 +
24903 +	/* If the task has no ghost budget, convert back from ghost.
24904 +	 * If the task is behind, undo ghost conversion so that it
24905 +	 * can catch up.
24906 +	 */
24907 +	if (behind || tsk_mc_data(task)->mc_job.ghost_budget == 0) {
24908 +		TRACE_MC_TASK(task, "Not a ghost task\n");
24909 +		tsk_mc_data(task)->mc_job.is_ghost = 0;
24910 +		tsk_mc_data(task)->mc_job.ghost_budget = 0;
24911 +	}
24912 +
24913 +	/* If server has run out of budget, wait until next release */
24914 +	if (budget_exhausted(task))
24915 +    {
24916 +		server_release(task);
24917 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
24918 +		if (tsk_mc_crit(task) == CRIT_LEVEL_C)
24919 +			server_release_crit_c(task);
24920 +#endif
24921 +    }
24922 +
24923 +	/* Requeue non-blocking tasks */
24924 +	if (is_running(task)) {
24925 +		job_arrival(task);
24926 +	}
24927 +}
24928 +
24929 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
24930 +void send_signal_to_monitor(void)
24931 +{
24932 +    struct siginfo info;
24933 +    struct task_struct *t;
24934 +    
24935 +    if (atomic_read(&num_report_queue) == 0)
24936 +        return;
24937 +    /* REPORT HERE .release, task_pid, .act_priority_point, virt_now, Level-C ready queue emptiness */
24938 +    info.si_signo = SIGIO;
24939 +    info.si_code = SI_QUEUE;
24940 +    info.si_int = atomic_read(&num_report_queue);
24941 +    
24942 +    t = find_task_by_vpid(signal_pid);
24943 +    
24944 +    if (t == NULL)
24945 +        printk(KERN_INFO "Registered monitoring program does not exist.\n");
24946 +    else {
24947 +        send_sig_info(SIGIO, &info, t);
24948 +    }
24949 +}
24950 +
24951 +void send_release_signal_to_monitor(void)
24952 +{
24953 +    struct siginfo info;
24954 +    struct task_struct *t;
24955 +    
24956 +    if (atomic_read(&num_release_report_queue) == 0 && atomic_read(&num_release_report_queue2))
24957 +        return;
24958 +    /* REPORT HERE .release, task_pid, .act_priority_point, virt_now, Level-C ready queue emptiness */
24959 +    info.si_signo = SIGIO;
24960 +    info.si_code = SI_QUEUE;
24961 +    info.si_int = atomic_read(&num_release_report_queue)+atomic_read(&num_release_report_queue2)+1000;
24962 +    
24963 +    t = find_task_by_vpid(signal_pid);
24964 +    
24965 +    if (t == NULL)
24966 +        printk(KERN_INFO "Registered monitoring program does not exist.\n");
24967 +    else {
24968 +        send_sig_info(SIGIO, &info, t);
24969 +    }
24970 +}
24971 +#endif
24972 +/**
24973 + * mc_ghost_exhausted() - Complete logically running ghost task.
24974 + */
24975 +#ifdef CONFIG_MERGE_TIMERS
24976 +static void mc_ghost_exhausted(struct rt_event *e)
24977 +{
24978 +	struct crit_entry *ce = container_of(e, struct crit_entry, event);
24979 +#else
24980 +static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
24981 +{
24982 +	struct crit_entry *ce = container_of(timer, struct crit_entry, timer);
24983 +#endif
24984 +
24985 +	unsigned long flags;
24986 +	struct task_struct *tmp = NULL;
24987 +
24988 +	local_irq_save(flags);
24989 +	TRACE("Ghost exhausted\n");
24990 +	TRACE_CRIT_ENTRY(ce, "Firing here\n");
24991 +
24992 +	/* Due to race conditions, we cannot just set the linked
24993 +	 * task's budget to 0 as it may no longer be the task
24994 +	 * for which this timer was armed. Instead, update the running
24995 +	 * task time and see if this causes exhaustion.
24996 +	 */
24997 +	raw_spin_lock(&crit_cpu(ce)->lock);
24998 +	if (ce->linked && is_ghost(ce->linked)) {
24999 +		update_ghost_time(ce->linked);
25000 +		if (tsk_mc_data(ce->linked)->mc_job.ghost_budget == 0) {
25001 +			tmp = ce->linked;
25002 +		}
25003 +	}
25004 +	raw_spin_unlock(&crit_cpu(ce)->lock);
25005 +
25006 +	if (tmp)
25007 +		job_completion(tmp, 0);
25008 +
25009 +	local_irq_restore(flags);
25010 +#ifndef CONFIG_MERGE_TIMERS
25011 +	return HRTIMER_NORESTART;
25012 +#endif
25013 +}
25014 +
25015 +/*
25016 + * The MC-CE common timer callback code for merged and non-merged timers.
25017 + * Returns the next time the timer should fire.
25018 + */
25019 +static lt_t __ce_timer_function(struct ce_dom_data *ce_data)
25020 +{
25021 +	struct crit_entry *ce = get_crit_entry_for(ce_data->cpu, CRIT_LEVEL_A);
25022 +	struct domain *dom = ce->domain;
25023 +	struct task_struct *old_link = NULL;
25024 +	lt_t next_timer_abs;
25025 +
25026 +	TRACE("MC level-A timer callback for CPU %d\n", ce_data->cpu);
25027 +	
25028 +
25029 +	raw_spin_lock(dom->lock);
25030 +
25031 +	raw_spin_lock(&crit_cpu(ce)->lock);
25032 +	if (ce->linked &&
25033 +	    ce->linked == ce_data->should_schedule &&
25034 +	    is_ghost(ce->linked))
25035 +	{
25036 +		old_link = ce->linked;
25037 +		tsk_mc_data(ce->linked)->mc_job.ghost_budget = 0;
25038 +		link_task_to_crit(ce, NULL);
25039 +	}
25040 +	raw_spin_unlock(&crit_cpu(ce)->lock);
25041 +
25042 +	next_timer_abs = mc_ce_timer_callback_common(dom);
25043 +
25044 +	/* Job completion will check for preemptions by means of calling job
25045 +	 * arrival if the task is not blocked */
25046 +	if (NULL != old_link) {
25047 +		STRACE("old_link " TS " so will call job completion\n", TA(old_link));
25048 +		raw_spin_unlock(dom->lock);
25049 +		job_completion(old_link, 0);
25050 +	} else {
25051 +		STRACE("old_link was null, so will call check for preempt\n");
25052 +		raw_spin_unlock(dom->lock);
25053 +		check_for_preempt(dom);
25054 +	}
25055 +	return next_timer_abs;
25056 +}
25057 +
25058 +#ifdef CONFIG_MERGE_TIMERS
25059 +static void ce_timer_function(struct rt_event *e)
25060 +{
25061 +	struct ce_dom_data *ce_data =
25062 +		container_of(e, struct ce_dom_data, event);
25063 +	unsigned long flags;
25064 +	lt_t next_timer_abs;
25065 +
25066 +	TS_LVLA_RELEASE_START;
25067 +
25068 +	local_irq_save(flags);
25069 +	next_timer_abs = __ce_timer_function(ce_data);
25070 +	add_event(per_cpu(cpus, ce_data->cpu).event_group, e, next_timer_abs);
25071 +	local_irq_restore(flags);
25072 +
25073 +	TS_LVLA_RELEASE_END;
25074 +}
25075 +#else /* else to CONFIG_MERGE_TIMERS */
25076 +static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
25077 +{
25078 +	struct ce_dom_data *ce_data =
25079 +		container_of(timer, struct ce_dom_data, timer);
25080 +	unsigned long flags;
25081 +	lt_t next_timer_abs;
25082 +
25083 +	TS_LVLA_RELEASE_START;
25084 +
25085 +	local_irq_save(flags);
25086 +	next_timer_abs = __ce_timer_function(ce_data);
25087 +	hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
25088 +	local_irq_restore(flags);
25089 +
25090 +	TS_LVLA_RELEASE_END;
25091 +
25092 +	return HRTIMER_RESTART;
25093 +}
25094 +#endif /* CONFIG_MERGE_TIMERS */
25095 +
25096 +/**
25097 + * mc_release_jobs() - Add heap of tasks to the system, check for preemptions.
25098 + */
25099 +static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks)
25100 +{
25101 +	unsigned long flags;
25102 +    int i;
25103 +	struct task_struct *first = bheap_peek(rt->order, tasks)->value;
25104 +	struct domain *dom = get_task_domain(first);
25105 +    raw_spin_lock_irqsave(dom->lock, flags);
25106 +    TRACE(TS "Jobs released\n", TA(first));
25107 +
25108 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME    
25109 +    if (rt->level == CRIT_LEVEL_C) {
25110 +        struct bheap t_heap;
25111 +        bheap_init(&t_heap);
25112 +        
25113 +        i = atomic_read(&num_release_report_queue);
25114 +        while(!bheap_empty(tasks)) {
25115 +            struct task_struct *t;
25116 +            struct bheap_node *hn = bheap_take(rt->order, tasks);
25117 +            if (!hn)
25118 +                break;
25119 +            t = bheap2task(hn);
25120 +            if (t->pid != signal_pid && t->rt_param.job_params.job_no > START_JOB_NO) {
25121 +                release_report_queue[i].seq = release_report_seq++;
25122 +                release_report_queue[i].pid = t->pid;
25123 +                release_report_queue[i].job_no = t->rt_param.job_params.job_no;
25124 +                i++;
25125 +            }
25126 +            bheap_insert(rt->order, &t_heap, hn);
25127 +        }
25128 +        atomic_set(&num_release_report_queue, i);
25129 +        if (atomic_read(&num_release_report_queue) > 1000)
25130 +            atomic_set(&num_release_report_queue, 0);
25131 +        while(!bheap_empty(&t_heap)) {
25132 +            struct bheap_node* hn = bheap_take(rt->order, &t_heap);
25133 +            if (!hn)
25134 +                break;
25135 +            bheap_insert(rt->order, tasks, hn);
25136 +        }
25137 +    }
25138 +#endif    
25139 +	__merge_ready(rt, tasks);
25140 +    check_for_preempt(dom);
25141 +    raw_spin_unlock_irqrestore(dom->lock, flags);
25142 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME    
25143 +    if (atomic_read(&num_release_report_queue) != 0)
25144 +        send_release_signal_to_monitor();
25145 +#endif        
25146 +}
25147 +
25148 +void mc_release_crit_c_at(struct task_struct *t, lt_t start)
25149 +{
25150 +    lt_t relative_pp = t->rt_param.task_params.period
25151 +                       - (((NR_CPUS - 1) * t->rt_param.task_params.exec_cost) / (NR_CPUS));
25152 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
25153 +    if (first_task_arrival == 0) {
25154 +        first_task_arrival = start;
25155 +        _mc_crit_c_virt_timer.last_act = litmus_clock();
25156 +        _mc_crit_c_virt_timer.last_virt = 0;
25157 +        _mc_crit_c_virt_timer.speed_num = 1;
25158 +        _mc_crit_c_virt_timer.speed_denom = 1;
25159 +	}
25160 +    else
25161 +        start = first_task_arrival;
25162 +#endif        
25163 +	t->rt_param.job_params.deadline = start;
25164 +	t->rt_param.job_params.release   = t->rt_param.job_params.deadline;
25165 +	t->rt_param.job_params.real_release =  t->rt_param.job_params.release;
25166 +	t->rt_param.job_params.deadline += get_rt_period(t);
25167 +	t->rt_param.job_params.real_deadline = t->rt_param.job_params.deadline;
25168 +	t->rt_param.job_params.exec_time = 0;
25169 +	/* update job sequence number */
25170 +	t->rt_param.job_params.job_no++;
25171 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME    
25172 +    t->rt_param.job_params.placeholder = 1;
25173 +    t->rt_param.job_params.act_priority_point = 0;
25174 +    t->rt_param.job_params.virt_priority_point = act_to_virt(&_mc_crit_c_rt, t->rt_param.job_params.release) + relative_pp;
25175 +    t->rt_param.job_params.act_last_release = 0;
25176 +#endif                        
25177 +	/* don't confuse Linux */
25178 +	t->rt.time_slice = 1;
25179 +
25180 +	set_rt_flags(t, RT_F_RUNNING);
25181 +}
25182 +
25183 +/**
25184 + * ms_task_new() - Setup new mixed-criticality task.
25185 + * Assumes that there are no partitioned domains after level B.
25186 + */
25187 +static void mc_task_new(struct task_struct *t, int on_rq, int running)
25188 +{
25189 +	unsigned long flags;
25190 +	struct cpu_entry* entry;
25191 +	enum crit_level level = tsk_mc_crit(t);
25192 +
25193 +	local_irq_save(flags);
25194 +	TRACE("New mixed criticality task %d\n", t->pid);
25195 +
25196 +	/* Assign domain */
25197 +	if (level < CRIT_LEVEL_C)
25198 +		entry = &per_cpu(cpus, get_partition(t));
25199 +	else
25200 +		entry = &per_cpu(cpus, task_cpu(t));
25201 +	t->rt_param._domain = entry->crit_entries[level].domain;
25202 +
25203 +	/* Setup job params */
25204 +    if (level == CRIT_LEVEL_C) {
25205 +		t->rt_param.task_params.relative_pp = t->rt_param.task_params.period - (((NR_CPUS - 1) * t->rt_param.task_params.exec_cost) / (NR_CPUS));
25206 +        mc_release_crit_c_at(t, litmus_clock());
25207 +	}
25208 +    else
25209 +        release_at(t, litmus_clock());
25210 +	
25211 +    tsk_mc_data(t)->mc_job.ghost_budget = 0;
25212 +	tsk_mc_data(t)->mc_job.is_ghost = 0;
25213 +	if (running) {
25214 +		BUG_ON(entry->scheduled);
25215 +		entry->scheduled = t;
25216 +		tsk_rt(t)->scheduled_on = entry->cpu;
25217 +	} else {
25218 +		t->rt_param.scheduled_on = NO_CPU;
25219 +	}
25220 +	t->rt_param.linked_on = NO_CPU;
25221 +
25222 +	job_arrival(t);
25223 +
25224 +	local_irq_restore(flags);
25225 +}
25226 +
25227 +/**
25228 + * mc_task_new() - Add task back into its domain check for preemptions.
25229 + */
25230 +static void mc_task_wake_up(struct task_struct *task)
25231 +{
25232 +	unsigned long flags;
25233 +	lt_t now = litmus_clock();
25234 +	local_irq_save(flags);
25235 +
25236 +	TRACE(TS " wakes up\n", TA(task));
25237 +	if (is_tardy(task, now)) {
25238 +		/* Task missed its last release */
25239 +		release_at(task, now);
25240 +		sched_trace_task_release(task);
25241 +	}
25242 +	if (!is_ghost(task))
25243 +		job_arrival(task);
25244 +
25245 +	local_irq_restore(flags);
25246 +}
25247 +
25248 +/**
25249 + * mc_task_block() - Remove task from state to prevent it being run anywhere.
25250 + */
25251 +static void mc_task_block(struct task_struct *task)
25252 +{
25253 +	unsigned long flags;
25254 +	local_irq_save(flags);
25255 +	TRACE(TS " blocks\n", TA(task));
25256 +	remove_from_all(task);
25257 +	local_irq_restore(flags);
25258 +}
25259 +
25260 +/**
25261 + * mc_task_exit() - Remove task from the system.
25262 + */
25263 +static void mc_task_exit(struct task_struct *task)
25264 +{
25265 +	unsigned long flags;
25266 +	local_irq_save(flags);
25267 +	BUG_ON(!is_realtime(task));
25268 +	TRACE(TS " RIP\n", TA(task));
25269 +
25270 +	remove_from_all(task);
25271 +	if (tsk_rt(task)->scheduled_on != NO_CPU) {
25272 +		per_cpu(cpus, tsk_rt(task)->scheduled_on).scheduled = NULL;
25273 +		tsk_rt(task)->scheduled_on = NO_CPU;
25274 +	}
25275 +
25276 +	if (CRIT_LEVEL_A == tsk_mc_crit(task))
25277 +		mc_ce_task_exit_common(task);
25278 +
25279 +	local_irq_restore(flags);
25280 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
25281 +    atomic_dec(&ntask);
25282 +    TRACE("Remaining tasks: %d\n", atomic_read(&ntask));
25283 +    if (atomic_read(&ntask) == 0) {
25284 +        TRACE("Reset all virtual time monitoring variables.\n");
25285 +        first_task_arrival = 0;
25286 +        atomic_set(&num_report_queue, 0);
25287 +        atomic_set(&num_release_report_queue, 0);
25288 +        report_seq = 0;
25289 +        release_report_seq = 0;
25290 +        _mc_crit_c_virt_timer.last_act = litmus_clock();
25291 +        _mc_crit_c_virt_timer.last_virt = 0;
25292 +        _mc_crit_c_virt_timer.speed_num = 1;
25293 +        _mc_crit_c_virt_timer.speed_denom = 1;
25294 +    }
25295 +#endif    
25296 +}
25297 +
25298 +/**
25299 + * mc_admit_task() - Return true if the task is valid.
25300 + * Assumes there are no partitioned levels after level B.
25301 + */
25302 +static long mc_admit_task(struct task_struct* task)
25303 +{
25304 +	const enum crit_level crit = tsk_mc_crit(task);
25305 +	long ret;
25306 +	if (!tsk_mc_data(task))	{
25307 +		printk(KERN_WARNING "Tried to admit task with no criticality "
25308 +			"level\n");
25309 +		ret = -EINVAL;
25310 +		goto out;
25311 +	}
25312 +	if (crit < CRIT_LEVEL_C && get_partition(task) == NO_CPU) {
25313 +		printk(KERN_WARNING "Tried to admit partitioned task with no "
25314 +		       "partition\n");
25315 +		ret = -EINVAL;
25316 +		goto out;
25317 +	}
25318 +	if (crit == CRIT_LEVEL_A) {
25319 +		ret = mc_ce_admit_task_common(task);
25320 +		if (ret)
25321 +			goto out;
25322 +	}
25323 +	printk(KERN_INFO "Admitted task with criticality level %d\n",
25324 +		tsk_mc_crit(task));
25325 +    atomic_inc(&ntask);
25326 +	ret = 0;
25327 +out:
25328 +	return ret;
25329 +}
25330 +
25331 +/**
25332 + * mc_schedule() - Return next task which should be scheduled.
25333 + */
25334 +static struct task_struct* mc_schedule(struct task_struct* prev)
25335 +{
25336 +	unsigned long flags;
25337 +	struct domain *dom;
25338 +	struct crit_entry *ce;
25339 +	struct cpu_entry* entry = &__get_cpu_var(cpus);
25340 +	int i, out_of_time, sleep, preempt, exists, blocks, global, lower;
25341 +	struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL;
25342 +
25343 +	local_irq_save(flags);
25344 +
25345 +	/* Litmus gave up because it couldn't access the stack of the CPU
25346 +	 * on which will_schedule was migrating from. Requeue it.
25347 +	 * This really only happens in VMs.
25348 +	 */
25349 +	if (entry->will_schedule && entry->will_schedule != prev) {
25350 +		entry->will_schedule->rt_param.scheduled_on = NO_CPU;
25351 +		low_prio_arrival(entry->will_schedule);
25352 +	}
25353 +
25354 +	raw_spin_lock(&entry->lock);
25355 +	/* Sanity checking */
25356 +
25357 +	BUG_ON(entry->scheduled && entry->scheduled != prev);
25358 +	BUG_ON(entry->scheduled && !is_realtime(prev));
25359 +	BUG_ON(is_realtime(prev) && !entry->scheduled);
25360 +
25361 +	/* Determine state */
25362 +	exists      = entry->scheduled != NULL;
25363 +	blocks      = exists && !is_running(entry->scheduled);
25364 +	out_of_time = exists &&	budget_enforced(entry->scheduled) &&
25365 +				budget_exhausted(entry->scheduled);
25366 +	sleep	    = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
25367 +	global      = exists && is_global_task(entry->scheduled);
25368 +	preempt     = entry->scheduled != entry->linked;
25369 +	lower       = exists && preempt && entry->linked &&
25370 +		tsk_mc_crit(entry->scheduled) > tsk_mc_crit(entry->linked);
25371 +
25372 +	TRACE(TS " blocks:%d out_of_time:%d sleep:%d preempt:%d\n",
25373 +	      TA(prev), blocks, out_of_time, sleep, preempt);
25374 +	if (exists)
25375 +		prev->rt_param.scheduled_on = NO_CPU;
25376 +
25377 +	raw_spin_unlock(&entry->lock);
25378 +
25379 +
25380 +#ifdef CONFIG_PLUGIN_MC_REDIRECT
25381 +	if (smp_processor_id() == interrupt_cpu)
25382 +		fix_global_levels();
25383 +#endif
25384 +
25385 +	/* If a task blocks we have no choice but to reschedule */
25386 +	if (blocks)
25387 +		remove_from_all(entry->scheduled);
25388 +	/* Any task which exhausts its budget or sleeps waiting for its next
25389 +	 * period completes unless its execution has been forcibly stopped.
25390 +	 */
25391 +	if ((out_of_time || sleep) && !blocks)/* && !preempt)*/
25392 +		job_completion(entry->scheduled, !sleep);
25393 +	/* Global scheduled tasks must wait for a deschedule before they
25394 +	 * can rejoin the global state. Rejoin them here.
25395 +	 */
25396 +	else if (global && preempt && !blocks) {
25397 +		if (lower)
25398 +			low_prio_arrival(entry->scheduled);
25399 +		else
25400 +			job_arrival(entry->scheduled);
25401 +	}
25402 +
25403 +	/* Pick next task if none is linked */
25404 +	raw_spin_lock(&entry->lock);
25405 +	//if (entry->linked && is_realtime(entry->linked))
25406 +	//	STRACE(TS " State: linked = " TS " scheduled = " TS "\n", TA(prev), TA(entry->linked), TA(entry->scheduled));
25407 +	
25408 +	for (i = 0; i < NUM_CRIT_LEVELS && !entry->linked; i++) {
25409 +		ce = &entry->crit_entries[i];
25410 +		dom = ce->domain;
25411 +
25412 +		/* Swap locks. We cannot acquire a domain lock while
25413 +		 * holding an entry lock or deadlocks will happen.
25414 +		 */
25415 +		raw_spin_unlock(&entry->lock);
25416 +		raw_spin_lock(dom->lock);
25417 +
25418 +		/* Do domain stuff before grabbing CPU locks */
25419 +		dtask = dom->peek_ready(dom);
25420 +		fix_crit_position(ce);
25421 +
25422 +		raw_spin_lock(&entry->lock);
25423 +
25424 +		if (!entry->linked && !ce->linked && dtask && can_use(ce)) {
25425 +			dom->take_ready(dom);
25426 +			link_task_to_crit(ce, dtask);
25427 +			update_crit_position(ce);
25428 +			ready_task = (is_ghost(dtask)) ? NULL : dtask;
25429 +
25430 +			/* Task found! */
25431 +			if (ready_task) {
25432 +				link_task_to_cpu(entry, ready_task);
25433 +				raw_spin_unlock(dom->lock);
25434 +				update_crit_levels(entry);
25435 +				raw_spin_lock(&entry->lock);
25436 +				continue;
25437 +			}
25438 +		}
25439 +		raw_spin_unlock(dom->lock);
25440 +	}
25441 +	
25442 +
25443 +	/* Schedule next task */
25444 +	next = entry->linked;
25445 +	if (entry->linked)
25446 +		entry->linked->rt_param.scheduled_on = entry->cpu;
25447 +	entry->will_schedule = entry->linked;
25448 +	sched_state_task_picked();
25449 +
25450 +	raw_spin_unlock(&entry->lock);
25451 +	local_irq_restore(flags);
25452 +	if (next) {
25453 +		TRACE_MC_TASK(next, "Picked this task\n");
25454 +	} else if (exists && !next) {
25455 +		;//TRACE_ENTRY(entry, "Becomes idle at %llu\n", litmus_clock());
25456 +	}
25457 +	return next;
25458 +}
25459 +
25460 +void mc_finish_switch(struct task_struct *prev)
25461 +{
25462 +	struct cpu_entry* entry = &__get_cpu_var(cpus);
25463 +	entry->scheduled = is_realtime(current) ? current : NULL;
25464 +	TRACE_TASK(prev, "Switched away from to " TS "\n",
25465 +		   TA(entry->scheduled));
25466 +}
25467 +
25468 +/*
25469 + * This is the plugin's release at function, called by the release task-set
25470 + * system call. Other places in the file use the generic LITMUS release_at(),
25471 + * which is not this.
25472 + */
25473 +void mc_release_at(struct task_struct *ts, lt_t start)
25474 +{
25475 +	/* hack so that we can have CE timers start at the right time */
25476 +	if (CRIT_LEVEL_A == tsk_mc_crit(ts))
25477 +		mc_ce_release_at_common(ts, start);
25478 +	else
25479 +		release_at(ts, start);
25480 +}
25481 +
25482 +long mc_deactivate_plugin(void)
25483 +{
25484 +	return mc_ce_deactivate_plugin_common();
25485 +}
25486 +
25487 +/* **************************************************************************
25488 + * Initialization
25489 + * ************************************************************************** */
25490 +
25491 +/* Initialize values here so that they are allocated with the module
25492 + * and destroyed when the module is unloaded.
25493 + */
25494 +
25495 +/* LVL-A */
25496 +DEFINE_PER_CPU(struct domain_data, _mc_crit_a);
25497 +DEFINE_PER_CPU(raw_spinlock_t, _mc_crit_a_lock);
25498 +DEFINE_PER_CPU(struct ce_dom_data, _mc_crit_a_ce_data);
25499 +/* LVL-B */
25500 +DEFINE_PER_CPU(struct domain_data, _mc_crit_b);
25501 +DEFINE_PER_CPU(rt_domain_t, _mc_crit_b_rt);
25502 +/* LVL-C */
25503 +static struct domain_data _mc_crit_c;
25504 +struct bheap _mc_heap_c;
25505 +struct bheap_node _mc_nodes_c[NR_CPUS];
25506 +
25507 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
25508 +void mc_set_monitor_pid(pid_t pid)
25509 +{
25510 +	printk(KERN_INFO "Registering a monitor program pid: %d\n", pid);
25511 +	signal_pid = pid;
25512 +}	
25513 +#endif
25514 +	
25515 +static long mc_activate_plugin(void)
25516 +{
25517 +	struct domain_data *dom_data;
25518 +	struct domain *dom;
25519 +	struct domain_data *our_domains[NR_CPUS];
25520 +	int cpu, n = 0;
25521 +	long ret;
25522 +
25523 +#ifdef CONFIG_RELEASE_MASTER
25524 +	interrupt_cpu = atomic_read(&release_master_cpu);
25525 +#if defined(CONFIG_PLUGIN_MC_REDIRECT) || defined(CONFIG_PLUGIN_MC_RELEASE_MASTER)
25526 +	if (NO_CPU == interrupt_cpu) {
25527 +		printk(KERN_ERR "LITMUS-MC: need a release master\n");
25528 +		ret = -EINVAL;
25529 +		goto out;
25530 +	}
25531 +#endif
25532 +#endif
25533 +
25534 +	for_each_online_cpu(cpu) {
25535 +		BUG_ON(NR_CPUS <= n);
25536 +		dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain;
25537 +		dom_data = domain_data(dom);
25538 +		our_domains[cpu] = dom_data;
25539 +#if defined(CONFIG_MERGE_TIMERS) && defined(CONFIG_PLUGIN_MC_RELEASE_MASTER)
25540 +		per_cpu(cpus, cpu).event_group =
25541 +			get_event_group_for(interrupt_cpu);
25542 +#elif defined(CONFIG_MERGE_TIMERS) && !defined(CONFIG_PLUGIN_MC_RELEASE_MASTER)
25543 +		per_cpu(cpus, cpu).event_group = get_event_group_for(cpu);
25544 +#endif
25545 +		n++;
25546 +	}
25547 +	ret = mc_ce_set_domains(n, our_domains);
25548 +	if (ret)
25549 +		goto out;
25550 +	ret = mc_ce_activate_plugin_common();
25551 +out:
25552 +	return ret;
25553 +}
25554 +
25555 +static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = {
25556 +	.plugin_name		= "MC",
25557 +	.task_new		= mc_task_new,
25558 +	.complete_job		= complete_job,
25559 +	.task_exit		= mc_task_exit,
25560 +	.schedule		= mc_schedule,
25561 +	.task_wake_up		= mc_task_wake_up,
25562 +	.task_block		= mc_task_block,
25563 +	.admit_task		= mc_admit_task,
25564 +	.activate_plugin	= mc_activate_plugin,
25565 +	.release_at		= mc_release_at,
25566 +	.deactivate_plugin	= mc_deactivate_plugin,
25567 +	.finish_switch		= mc_finish_switch,
25568 +};
25569 +
25570 +static void init_crit_entry(struct crit_entry *ce, enum crit_level level,
25571 +			    struct domain_data *dom_data,
25572 +			    struct bheap_node *node)
25573 +{
25574 +	ce->level  = level;
25575 +	ce->linked = NULL;
25576 +	ce->node   = node;
25577 +	ce->domain = &dom_data->domain;
25578 +	ce->state  = CS_ACTIVE;
25579 +#ifdef CONFIG_MERGE_TIMERS
25580 +	init_event(&ce->event, level, mc_ghost_exhausted,
25581 +		   event_list_alloc(GFP_ATOMIC));
25582 +#else
25583 +	hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
25584 +	ce->timer.function = mc_ghost_exhausted;
25585 +#endif
25586 +
25587 +}
25588 +
25589 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
25590 +static void init_virt_timer(rt_domain_t *rt, int num, int denom)
25591 +{
25592 +    struct virt_timer *vt;
25593 +    
25594 +    rt->virt_timer = &_mc_crit_c_virt_timer;
25595 +	_mc_crit_c_virt_timer.rt = rt;
25596 +    vt = rt->virt_timer;
25597 +    vt->last_act = litmus_clock();
25598 +    vt->last_virt = 0;
25599 +	vt->speed_num = num;
25600 +    vt->speed_denom = denom;
25601 +}
25602 +    
25603 +void mc_change_speed(int num, int  denom)
25604 +{
25605 +	struct virt_timer *vt = &_mc_crit_c_virt_timer;
25606 +	lt_t act = litmus_clock();
25607 +	lt_t virt = act_to_virt(vt->rt, act);
25608 +	struct bheap temp_queue;
25609 +	struct task_struct *t;
25610 +	int cpu, i, prev_num, prev_denom;
25611 +	lt_t prev_last_act, prev_last_virt, prev_virt_release_time;
25612 +    unsigned long flags;
25613 +	
25614 +	bheap_init(&temp_queue);
25615 +
25616 +	/* run queue */
25617 +	raw_spin_lock_irqsave(&vt->rt->ready_lock, flags);
25618 +	BUG_ON(vt->rt->level != CRIT_LEVEL_C);
25619 +	while(!bheap_empty(&vt->rt->ready_queue)) {
25620 +		struct bheap_node* hn = bheap_take(vt->rt->order, &vt->rt->ready_queue);
25621 +		if (!hn)
25622 +			break;
25623 +		t = bheap2task(hn);
25624 +		TRACE_MC_TASK(t, " virt_pp: %llu, placeholder: %d\n", t->rt_param.job_params.virt_priority_point, t->rt_param.job_params.placeholder);
25625 +		if (t->rt_param.job_params.placeholder == 1 && t->rt_param.job_params.virt_priority_point < virt) {
25626 +			t->rt_param.job_params.placeholder = 0;
25627 +			t->rt_param.job_params.act_priority_point = virt_to_act(vt->rt, t->rt_param.job_params.virt_priority_point);
25628 +		}
25629 +		bheap_insert(vt->rt->order, &temp_queue, hn);
25630 +	}
25631 +	while(!bheap_empty(&temp_queue)) {
25632 +		struct bheap_node* hn = bheap_take(vt->rt->order, &temp_queue);
25633 +		if (!hn)
25634 +			break;
25635 +		bheap_insert(vt->rt->order, &vt->rt->ready_queue, hn);
25636 +	}
25637 +	raw_spin_unlock_irqrestore(&vt->rt->ready_lock, flags);
25638 +
25639 +	/* get running tasks */
25640 +	for_each_online_cpu(cpu) {
25641 +		struct cpu_entry* entry = &per_cpu(cpus, cpu);
25642 +		raw_spin_lock_irqsave(&entry->lock, flags);
25643 +		
25644 +		if (entry->scheduled && 
25645 +				(entry->scheduled->rt_param.job_params.virt_priority_point < virt) && 
25646 +				(tsk_mc_crit(entry->scheduled) == CRIT_LEVEL_C) &&
25647 +				(entry->scheduled->rt_param.job_params.placeholder == 1)) {
25648 +			entry->scheduled->rt_param.job_params.placeholder = 0;
25649 +			entry->scheduled->rt_param.job_params.act_priority_point = virt_to_act(vt->rt, entry->scheduled->rt_param.job_params.virt_priority_point);
25650 +		}
25651 +		
25652 +		raw_spin_unlock_irqrestore(&entry->lock, flags);
25653 +	}
25654 +	
25655 +	raw_spin_lock(&_mc_crit_c_virt_timer.lock);
25656 +	prev_last_act = _mc_crit_c_virt_timer.last_act;
25657 +	prev_last_virt = _mc_crit_c_virt_timer.last_virt;
25658 +	prev_num = _mc_crit_c_virt_timer.speed_num;
25659 +	prev_denom = _mc_crit_c_virt_timer.speed_denom;
25660 +	_mc_crit_c_virt_timer.last_act = act;
25661 +	_mc_crit_c_virt_timer.last_virt = virt;
25662 +	_mc_crit_c_virt_timer.speed_num = num;
25663 +	_mc_crit_c_virt_timer.speed_denom = denom;
25664 +	raw_spin_unlock(&_mc_crit_c_virt_timer.lock);
25665 +    
25666 +	raw_spin_lock_irqsave(&vt->rt->release_lock, flags);
25667 +	for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) {
25668 +		struct list_head *pos;
25669 +		struct release_heap *rh;
25670 +		struct bheap *heap;
25671 +		
25672 +		pos = vt->rt->release_queue.slot[i].next;
25673 +		list_for_each(pos, &vt->rt->release_queue.slot[i]) {
25674 +			rh = list_entry(pos, struct release_heap, list);
25675 +			heap = &rh->heap;
25676 +			/* change release_time and cancel timer and set timer with modified release_time */
25677 +			hrtimer_cancel(&rh->timer);
25678 +			prev_virt_release_time = prev_last_virt + (rh->release_time - prev_last_act)*prev_num/prev_denom;
25679 +			rh->release_time = virt_to_act(vt->rt, prev_virt_release_time);
25680 +			__hrtimer_start_range_ns(&rh->timer, ns_to_ktime(rh->release_time), 0, HRTIMER_MODE_ABS_PINNED, 0);
25681 +			BUG_ON(!bheap_empty(&temp_queue));
25682 +			while(!bheap_empty(heap)) {
25683 +				struct task_struct *t;
25684 +				struct bheap_node *hn = bheap_take(vt->rt->order, heap);
25685 +				lt_t prev_v_release_time;
25686 +				if (!hn)
25687 +					break;
25688 +				t = bheap2task(hn);
25689 +				/* change release time and deadline */
25690 +				prev_v_release_time = prev_last_virt + (t->rt_param.job_params.release - prev_last_act)*prev_num/prev_denom;
25691 +				t->rt_param.job_params.release = virt_to_act(vt->rt, prev_v_release_time);
25692 +				t->rt_param.job_params.deadline = t->rt_param.job_params.release + get_rt_period(t);
25693 +				t->rt_param.job_params.virt_priority_point = act_to_virt(vt->rt, t->rt_param.job_params.release) + t->rt_param.task_params.relative_pp;
25694 +				bheap_insert(vt->rt->order, &temp_queue, hn);
25695 +			}
25696 +			while(!bheap_empty(&temp_queue)) {
25697 +				struct bheap_node* hn = bheap_take(vt->rt->order, &temp_queue);
25698 +				if (!hn)
25699 +					break;
25700 +				bheap_insert(vt->rt->order, heap, hn);
25701 +			}
25702 +		}
25703 +	}
25704 +	raw_spin_unlock_irqrestore(&vt->rt->release_lock, flags);
25705 +}
25706 +#endif
25707 +
25708 +static void init_local_domain(struct cpu_entry *entry, struct domain_data *dom_data,
25709 +			      enum crit_level level)
25710 +{
25711 +	dom_data->heap = NULL;
25712 +	dom_data->crit_entry = &entry->crit_entries[level];
25713 +	init_crit_entry(dom_data->crit_entry, level, dom_data, NULL);
25714 +}
25715 +
25716 +static void init_global_domain(struct domain_data *dom_data, enum crit_level level,
25717 +			       struct bheap *heap, struct bheap_node *nodes)
25718 +{
25719 +	int cpu;
25720 +	struct cpu_entry *entry;
25721 +	struct crit_entry *ce;
25722 +	struct bheap_node *node;
25723 +
25724 +	dom_data->crit_entry = NULL;
25725 +	dom_data->heap = heap;
25726 +	bheap_init(heap);
25727 +
25728 +	for_each_online_cpu(cpu) {
25729 +		entry = &per_cpu(cpus, cpu);
25730 +		node = &nodes[cpu];
25731 +		ce = &entry->crit_entries[level];
25732 +		init_crit_entry(ce, level, dom_data, node);
25733 +		bheap_node_init(&ce->node, ce);
25734 +		bheap_insert(cpu_lower_prio, heap, node);
25735 +	}
25736 +}
25737 +
25738 +static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt,
25739 +				   enum crit_level prio, int is_partitioned, int cpu)
25740 +{
25741 +    if (prio == CRIT_LEVEL_C) {
25742 +        pd_domain_init(dom, rt, gel_ready_order, NULL,
25743 +                   mc_release_jobs, mc_preempt_needed,
25744 +                   gel_higher_prio);
25745 +    }
25746 +    else if (prio == CRIT_LEVEL_B) {
25747 +        pd_domain_init(dom, rt, edf_ready_order, NULL,
25748 +                   mc_release_jobs, mc_preempt_needed,
25749 +                   edf_higher_prio);
25750 +    }
25751 +    rt->level = prio;
25752 +#if defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS)
25753 +	/* All timers are on one CPU and release-master is using the event
25754 +	 * merging interface as well. */
25755 +	BUG_ON(NO_CPU == interrupt_cpu);
25756 +	rt->event_group = get_event_group_for(interrupt_cpu);
25757 +	rt->prio = prio;
25758 +#elif defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && !defined(CONFIG_MERGE_TIMERS)
25759 +	/* Using release master, but not merging timers. */
25760 +	rt->release_master = interrupt_cpu;
25761 +#elif !defined(CONFIG_PLUGIN_MC_RELEASE_MASTER) && defined(CONFIG_MERGE_TIMERS)
25762 +	/* Merge the timers, but don't move them to the release master. */
25763 +	if (is_partitioned) {
25764 +		rt->event_group = get_event_group_for(cpu);
25765 +	} else {
25766 +		/* Global timers will be added to the event groups that code is
25767 +		 * executing on when add_event() is called.
25768 +		 */
25769 +		rt->event_group = NULL;
25770 +	}
25771 +	rt->prio = prio;
25772 +#endif
25773 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME
25774 +    if (prio == CRIT_LEVEL_C) {
25775 +        init_virt_timer(rt, 1, 1);
25776 +    }
25777 +#endif    
25778 +}
25779 +
25780 +struct domain_data *ce_domain_for(int);
25781 +static int __init init_mc(void)
25782 +{
25783 +	int cpu;
25784 +	struct cpu_entry *entry;
25785 +	struct domain_data *dom_data;
25786 +	rt_domain_t *rt;
25787 +	raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */
25788 +	struct ce_dom_data *ce_data;
25789 +
25790 +	for_each_online_cpu(cpu) {
25791 +		entry = &per_cpu(cpus, cpu);
25792 +
25793 +		/* CPU */
25794 +		entry->cpu = cpu;
25795 +		entry->scheduled = NULL;
25796 +		entry->linked = NULL;
25797 +
25798 +		raw_spin_lock_init(&entry->lock);
25799 +
25800 +#ifdef CONFIG_PLUGIN_MC_REDIRECT
25801 +		raw_spin_lock_init(&entry->redir_lock);
25802 +		INIT_LIST_HEAD(&entry->redir);
25803 +#endif
25804 +
25805 +		/* CRIT_LEVEL_A */
25806 +		dom_data = &per_cpu(_mc_crit_a, cpu);
25807 +		ce_data = &per_cpu(_mc_crit_a_ce_data, cpu);
25808 +		a_dom_lock = &per_cpu(_mc_crit_a_lock, cpu);
25809 +		raw_spin_lock_init(a_dom_lock);
25810 +		ce_domain_init(&dom_data->domain,
25811 +				a_dom_lock, ce_requeue, ce_peek_and_take_ready,
25812 +				ce_peek_and_take_ready, mc_preempt_needed,
25813 +				ce_higher_prio, ce_data, cpu,
25814 +				ce_timer_function);
25815 +		init_local_domain(entry, dom_data, CRIT_LEVEL_A);
25816 +		dom_data->domain.name = "LVL-A";
25817 +
25818 +		/* CRIT_LEVEL_B */
25819 +		dom_data = &per_cpu(_mc_crit_b, cpu);
25820 +		rt = &per_cpu(_mc_crit_b_rt, cpu);
25821 +		init_local_domain(entry, dom_data, CRIT_LEVEL_B);
25822 +		init_edf_domain(&dom_data->domain, rt, CRIT_LEVEL_B, 1, cpu);
25823 +		b_dom_lock = dom_data->domain.lock;
25824 +		raw_spin_lock_init(b_dom_lock);
25825 +		dom_data->domain.name = "LVL-B";
25826 +	}
25827 +
25828 +	/* CRIT_LEVEL_C */
25829 +	init_global_domain(&_mc_crit_c, CRIT_LEVEL_C,
25830 +			   &_mc_heap_c, _mc_nodes_c);
25831 +	init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt, CRIT_LEVEL_C,
25832 +			0, NO_CPU);
25833 +	c_dom_lock = _mc_crit_c.domain.lock;
25834 +	raw_spin_lock_init(c_dom_lock);
25835 +
25836 +#ifdef CONFIG_PLUGIN_VIRTUAL_TIME	
25837 +    raw_spin_lock_init(&_mc_crit_c_virt_timer.lock);
25838 +    spin_lock_init(&report_queue_lock);
25839 +    atomic_set(&num_report_queue, 0);
25840 +    atomic_set(&num_release_report_queue, 0);
25841 +    atomic_set(&num_release_report_queue2, 0);
25842 +    atomic_set(&ntask, 0);
25843 +    atomic_set(&pend_release_signal, 0);
25844 +    release_report_queue = kmalloc(sizeof(struct report_data)*1024, GFP_ATOMIC);
25845 +    release_report_queue2 = kmalloc(sizeof(struct report_data)*1024, GFP_ATOMIC);
25846 +    report_queue = kmalloc(sizeof(struct report_data)*1024, GFP_ATOMIC);
25847 +#endif    
25848 +	_mc_crit_c.domain.name = "LVL-C";
25849 +
25850 +	return register_sched_plugin(&mc_plugin);
25851 +}
25852 +
25853 +module_init(init_mc);
25854 diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
25855 new file mode 100644
25856 index 0000000..af02dfd
25857 --- /dev/null
25858 +++ b/litmus/sched_mc_ce.c
25859 @@ -0,0 +1,1052 @@
25860 +/**
25861 + * litmus/sched_mc_ce.c
25862 + *
25863 + * The Cyclic Executive (CE) scheduler used by the mixed criticality scheduling
25864 + * algorithm.
25865 + */
25866 +
25867 +#include <asm/atomic.h>
25868 +#include <asm/uaccess.h>
25869 +
25870 +#include <linux/module.h>
25871 +#include <linux/percpu.h>
25872 +#include <linux/hrtimer.h>
25873 +#include <linux/pid.h>
25874 +#include <linux/sched.h>
25875 +#include <linux/proc_fs.h>
25876 +
25877 +#include <litmus/litmus.h>
25878 +#include <litmus/sched_plugin.h>
25879 +#include <litmus/rt_domain.h>
25880 +#include <litmus/rt_param.h>
25881 +#include <litmus/litmus_proc.h>
25882 +#include <litmus/sched_trace.h>
25883 +#include <litmus/jobs.h>
25884 +#include <litmus/sched_mc.h>
25885 +#include <litmus/ce_domain.h>
25886 +
25887 +static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp;
25888 +
25889 +#define using_linux_plugin() (litmus == &linux_sched_plugin)
25890 +
25891 +/* get a reference to struct domain for a CPU */
25892 +#define get_domain_for(cpu) (&per_cpu(domains, cpu)->domain)
25893 +
25894 +#define get_pid_table(cpu) (&per_cpu(ce_pid_table, cpu))
25895 +#define get_pid_entry(cpu, idx) (&(get_pid_table(cpu)->entries[idx]))
25896 +
25897 +static atomic_t start_time_set = ATOMIC_INIT(-1);
25898 +static atomic64_t start_time = ATOMIC64_INIT(0);
25899 +static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL;
25900 +
25901 +/*
25902 + * Cache the budget along with the struct PID for a task so that we don't need
25903 + * to fetch its task_struct every time we check to see what should be
25904 + * scheduled.
25905 + */
25906 +struct ce_pid_entry {
25907 +	struct pid *pid;
25908 +	lt_t budget;
25909 +	/* accumulated (summed) budgets, including this one */
25910 +	lt_t acc_time;
25911 +	unsigned int expected_job;
25912 +};
25913 +
25914 +/*
25915 + * Each CPU needs a mapping of level A ID (integer) to struct pid so that we
25916 + * can get its task struct.
25917 + */
25918 +struct ce_pid_table {
25919 +	struct ce_pid_entry entries[CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS];
25920 +	int num_pid_entries;
25921 +	lt_t cycle_time;
25922 +};
25923 +
25924 +DEFINE_PER_CPU(struct ce_pid_table, ce_pid_table);
25925 +
25926 +/*
25927 + * How we get the domain for a given CPU locally. Set with the
25928 + * mc_ce_set_domains function. Must be done before activating plugins. Be
25929 + * careful when using domains as a variable elsewhere in this file.
25930 + */
25931 +
25932 +DEFINE_PER_CPU(struct domain_data*, domains);
25933 +
25934 +/*
25935 + * The domains and other data used by the MC-CE plugin when it runs alone.
25936 + */
25937 +DEFINE_PER_CPU(struct domain_data, _mc_ce_doms);
25938 +DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data);
25939 +DEFINE_PER_CPU(raw_spinlock_t, _mc_ce_dom_locks);
25940 +
25941 +#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
25942 +static int interrupt_cpu;
25943 +#endif
25944 +
25945 +long mc_ce_set_domains(const int n, struct domain_data *domains_in[])
25946 +{
25947 +	const int max = (NR_CPUS < n) ? NR_CPUS : n;
25948 +	struct domain_data *new_dom = NULL;
25949 +	int i, ret;
25950 +	if (!using_linux_plugin()) {
25951 +		printk(KERN_WARNING "can't set MC-CE domains when not using "
25952 +				"Linux scheduler.\n");
25953 +		ret = -EINVAL;
25954 +		goto out;
25955 +	}
25956 +	for (i = 0; i < max; ++i) {
25957 +		new_dom = domains_in[i];
25958 +		per_cpu(domains, i) = new_dom;
25959 +	}
25960 +	ret = 0;
25961 +out:
25962 +	return ret;
25963 +}
25964 +
25965 +unsigned int mc_ce_get_expected_job(const int cpu, const int idx)
25966 +{
25967 +	const struct ce_pid_table *pid_table = get_pid_table(cpu);
25968 +	BUG_ON(0 > cpu);
25969 +	BUG_ON(0 > idx);
25970 +	BUG_ON(pid_table->num_pid_entries <= idx);
25971 +	return pid_table->entries[idx].expected_job;
25972 +}
25973 +
25974 +/*
25975 + * Get the offset into the cycle taking the start time into account.
25976 + */
25977 +static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time)
25978 +{
25979 +	long long st = atomic64_read(&start_time);
25980 +	lt_t offset = (when - st) % cycle_time;
25981 +	TRACE("when: %llu  cycle_time: %llu start_time: %lld  offset %llu\n",
25982 +			when, cycle_time, st, offset);
25983 +	return offset;
25984 +}
25985 +
25986 +/*
25987 + * The user land job completion call will set the RT_F_SLEEP flag and then
25988 + * call schedule. This function is used when schedule sleeps a task.
25989 + *
25990 + * Do not call prepare_for_next_period on Level-A tasks!
25991 + */
25992 +static void mc_ce_job_completion(struct domain *dom, struct task_struct *ts)
25993 +{
25994 +	const int cpu = task_cpu(ts);
25995 +	const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id;
25996 +	const struct ce_pid_entry *pid_entry = get_pid_entry(cpu, idx);
25997 +	unsigned int just_finished;
25998 +
25999 +	TRACE_TASK(ts, "Completed\n");
26000 +
26001 +	sched_trace_task_completion(ts, 0);
26002 +	/* post-increment is important here */
26003 +	just_finished = (tsk_rt(ts)->job_params.job_no)++;
26004 +
26005 +	/* Job completes in expected window: everything is normal.
26006 +	 * Job completes in an earlier window: BUG(), that's wrong.
26007 +	 * Job completes in a later window: The job is behind.
26008 +	 */
26009 +	if (just_finished < pid_entry->expected_job) {
26010 +		/* this job is already released because it's running behind */
26011 +		set_rt_flags(ts, RT_F_RUNNING);
26012 +		TRACE_TASK(ts, "appears behind: the expected job is %u but "
26013 +				"job %u just completed\n",
26014 +				pid_entry->expected_job, just_finished);
26015 +	} else if (pid_entry->expected_job < just_finished) {
26016 +		printk(KERN_CRIT "job %u completed in expected job %u which "
26017 +				"seems too early\n", just_finished,
26018 +				pid_entry->expected_job);
26019 +		BUG();
26020 +	}
26021 +}
26022 +
26023 +
26024 +/*
26025 + * Return the index into the PID entries table of what to schedule next.
26026 + * Don't call if the table is empty. Assumes the caller has the domain lock.
26027 + * The offset parameter is the offset into the cycle.
26028 + *
26029 + * TODO Currently O(n) in the number of tasks on the CPU. Binary search?
26030 + */
26031 +static int mc_ce_schedule_at(const struct domain *dom, lt_t offset)
26032 +{
26033 +	const struct ce_dom_data *ce_data = dom->data;
26034 +	struct ce_pid_table *pid_table = get_pid_table(ce_data->cpu);
26035 +	const struct ce_pid_entry *pid_entry = NULL;
26036 +	int idx;
26037 +
26038 +	BUG_ON(pid_table->cycle_time < 1);
26039 +	BUG_ON(pid_table->num_pid_entries < 1);
26040 +
26041 +	for (idx = 0; idx < pid_table->num_pid_entries; ++idx) {
26042 +		pid_entry = &pid_table->entries[idx];
26043 +		if (offset < pid_entry->acc_time) {
26044 +			/* found task to schedule in this window */
26045 +			break;
26046 +		}
26047 +	}
26048 +	/* can only happen if cycle_time is not right */
26049 +	BUG_ON(pid_entry->acc_time > pid_table->cycle_time);
26050 +	TRACE("schedule at returning task %d for CPU %d\n", idx, ce_data->cpu);
26051 +	return idx;
26052 +}
26053 +
26054 +static struct task_struct *mc_ce_schedule(struct task_struct *prev)
26055 +{
26056 +	struct domain *dom = get_domain_for(smp_processor_id());
26057 +	struct ce_dom_data *ce_data = dom->data;
26058 +	struct task_struct *next = NULL;
26059 +	int exists, sleep, should_sched_exists, should_sched_blocked,
26060 +	    should_sched_asleep;
26061 +
26062 +	raw_spin_lock(dom->lock);
26063 +
26064 +	/* sanity checking */
26065 +	BUG_ON(ce_data->scheduled && ce_data->scheduled != prev);
26066 +	BUG_ON(ce_data->scheduled && !is_realtime(prev));
26067 +	BUG_ON(is_realtime(prev) && !ce_data->scheduled);
26068 +
26069 +	exists = NULL != ce_data->scheduled;
26070 +	sleep = exists && RT_F_SLEEP == get_rt_flags(ce_data->scheduled);
26071 +
26072 +	TRACE("exists: %d, sleep: %d\n", exists, sleep);
26073 +
26074 +	if (sleep)
26075 +		mc_ce_job_completion(dom, ce_data->scheduled);
26076 +
26077 +	/* these checks must go after the call to mc_ce_job_completion in case
26078 +	 * a late task needs to be scheduled again right away and its the only
26079 +	 * task on a core
26080 +	 */
26081 +	should_sched_exists = NULL != ce_data->should_schedule;
26082 +	should_sched_blocked = should_sched_exists &&
26083 +		!is_running(ce_data->should_schedule);
26084 +	should_sched_asleep = should_sched_exists &&
26085 +		RT_F_SLEEP == get_rt_flags(ce_data->should_schedule);
26086 +
26087 +	TRACE("should_sched_exists: %d, should_sched_blocked: %d, "
26088 +			"should_sched_asleep: %d\n", should_sched_exists,
26089 +			should_sched_blocked, should_sched_asleep);
26090 +
26091 +	if (should_sched_exists && !should_sched_blocked &&
26092 +			!should_sched_asleep) {
26093 +		/*
26094 +		 * schedule the task that should be executing in the cyclic
26095 +		 * schedule if it is not blocked and not sleeping
26096 +		 */
26097 +		next = ce_data->should_schedule;
26098 +	}
26099 +	sched_state_task_picked();
26100 +	raw_spin_unlock(dom->lock);
26101 +	return next;
26102 +}
26103 +
26104 +static void mc_ce_finish_switch(struct task_struct *prev)
26105 +{
26106 +	struct domain *dom = get_domain_for(smp_processor_id());
26107 +	struct ce_dom_data *ce_data = dom->data;
26108 +
26109 +	TRACE("finish switch\n");
26110 +
26111 +	if (is_realtime(current) && CRIT_LEVEL_A == tsk_mc_crit(current))
26112 +		ce_data->scheduled = current;
26113 +	else
26114 +		ce_data->scheduled = NULL;
26115 +}
26116 +
26117 +/*
26118 + * Admit task called to see if this task is permitted to enter the system.
26119 + * Here we look up the task's PID structure and save it in the proper slot on
26120 + * the CPU this task will run on.
26121 + */
26122 +long mc_ce_admit_task_common(struct task_struct *ts)
26123 +{
26124 +	struct domain *dom = get_domain_for(get_partition(ts));
26125 +	struct ce_dom_data *ce_data = dom->data;
26126 +	struct mc_data *mcd = tsk_mc_data(ts);
26127 +	struct pid *pid = NULL;
26128 +	long retval = -EINVAL;
26129 +	const int lvl_a_id = mcd->mc_task.lvl_a_id;
26130 +	struct ce_pid_table *pid_table = get_pid_table(ce_data->cpu);
26131 +
26132 +	BUG_ON(get_partition(ts) != ce_data->cpu);
26133 +
26134 +	/* check the task has migrated to the right CPU (like in sched_cedf) */
26135 +	if (task_cpu(ts) != get_partition(ts)) {
26136 +		printk(KERN_INFO "litmus: %d admitted on CPU %d but want %d ",
26137 +				ts->pid, task_cpu(ts), get_partition(ts));
26138 +		goto out;
26139 +	}
26140 +
26141 +	/* only level A tasks can be CE */
26142 +	if (!mcd || CRIT_LEVEL_A != tsk_mc_crit(ts)) {
26143 +		printk(KERN_INFO "litmus: non-MC or non level A task %d\n",
26144 +				ts->pid);
26145 +		goto out;
26146 +	}
26147 +
26148 +	/* try and get the task's PID structure */
26149 +	pid = get_task_pid(ts, PIDTYPE_PID);
26150 +	if (IS_ERR_OR_NULL(pid)) {
26151 +		printk(KERN_INFO "litmus: couldn't get pid struct for %d\n",
26152 +				ts->pid);
26153 +		goto out;
26154 +	}
26155 +
26156 +	if (lvl_a_id >= pid_table->num_pid_entries) {
26157 +		printk(KERN_INFO "litmus: level A id greater than expected "
26158 +				"number of tasks %d for %d cpu %d\n",
26159 +				pid_table->num_pid_entries, ts->pid,
26160 +				get_partition(ts));
26161 +		goto out_put_pid;
26162 +	}
26163 +	if (pid_table->entries[lvl_a_id].pid) {
26164 +		printk(KERN_INFO "litmus: have saved pid info id: %d cpu: %d\n",
26165 +				lvl_a_id, get_partition(ts));
26166 +		goto out_put_pid;
26167 +	}
26168 +	if (get_exec_cost(ts) >= pid_table->entries[lvl_a_id].budget) {
26169 +		printk(KERN_INFO "litmus: execution cost %llu is larger than "
26170 +				"the budget %llu\n",
26171 +				get_exec_cost(ts),
26172 +				pid_table->entries[lvl_a_id].budget);
26173 +		goto out_put_pid;
26174 +	}
26175 +	pid_table->entries[lvl_a_id].pid = pid;
26176 +	retval = 0;
26177 +	/* don't call put_pid if we are successful */
26178 +	goto out;
26179 +
26180 +out_put_pid:
26181 +	put_pid(pid);
26182 +out:
26183 +	return retval;
26184 +}
26185 +
26186 +static long mc_ce_admit_task(struct task_struct *ts)
26187 +{
26188 +	struct domain *dom = get_domain_for(get_partition(ts));
26189 +	unsigned long flags, retval;
26190 +	raw_spin_lock_irqsave(dom->lock, flags);
26191 +	retval = mc_ce_admit_task_common(ts);
26192 +	raw_spin_unlock_irqrestore(dom->lock, flags);
26193 +	return retval;
26194 +}
26195 +
26196 +/*
26197 + * Called to set up a new real-time task (after the admit_task callback).
26198 + * At this point the task's struct PID is already hooked up on the destination
26199 + * CPU. The task may already be running.
26200 + */
26201 +static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running)
26202 +{
26203 +	const int cpu = task_cpu(ts);
26204 +	struct domain *dom = get_domain_for(cpu);
26205 +	struct ce_dom_data *ce_data = dom->data;
26206 +	struct ce_pid_table *pid_table = get_pid_table(cpu);
26207 +	struct pid *pid_should_be_running;
26208 +	struct ce_pid_entry *pid_entry;
26209 +	unsigned long flags;
26210 +	int idx, should_be_running;
26211 +	lt_t offset;
26212 +
26213 +	raw_spin_lock_irqsave(dom->lock, flags);
26214 +	pid_entry = get_pid_entry(cpu, tsk_mc_data(ts)->mc_task.lvl_a_id);
26215 +	/* initialize some task state */
26216 +	set_rt_flags(ts, RT_F_RUNNING);
26217 +
26218 +	/* have to call mc_ce_schedule_at because the task only gets a PID
26219 +	 * entry after calling admit_task */
26220 +	offset = get_cycle_offset(litmus_clock(), pid_table->cycle_time);
26221 +	idx = mc_ce_schedule_at(dom, offset);
26222 +	pid_should_be_running = get_pid_entry(cpu, idx)->pid;
26223 +	rcu_read_lock();
26224 +	should_be_running = (ts == pid_task(pid_should_be_running, PIDTYPE_PID));
26225 +	rcu_read_unlock();
26226 +	if (running) {
26227 +		/* admit task checks that the task is not on the wrong CPU */
26228 +		BUG_ON(task_cpu(ts) != get_partition(ts));
26229 +		BUG_ON(ce_data->scheduled);
26230 +		ce_data->scheduled = ts;
26231 +
26232 +		if (should_be_running)
26233 +			ce_data->should_schedule = ts;
26234 +		else
26235 +			preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
26236 +	} else if (!running && should_be_running) {
26237 +		ce_data->should_schedule = ts;
26238 +		preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
26239 +	}
26240 +	raw_spin_unlock_irqrestore(dom->lock, flags);
26241 +}
26242 +
26243 +/*
26244 + * Called to re-introduce a task after blocking.
26245 + * Can potentailly be called multiple times.
26246 + */
26247 +static void mc_ce_task_wake_up(struct task_struct *ts)
26248 +{
26249 +	struct domain *dom = get_domain_for(get_partition(ts));
26250 +	struct ce_dom_data *ce_data = dom->data;
26251 +	unsigned long flags;
26252 +
26253 +	TRACE_TASK(ts, "wake up\n");
26254 +
26255 +	raw_spin_lock_irqsave(dom->lock, flags);
26256 +	if (ts == ce_data->should_schedule && ts != ce_data->scheduled)
26257 +		preempt_if_preemptable(ts, ce_data->cpu);
26258 +	raw_spin_unlock_irqrestore(dom->lock, flags);
26259 +}
26260 +
26261 +/*
26262 + * Called to notify the plugin of a blocking real-time tasks. Only called for
26263 + * real-time tasks and before schedule is called.
26264 + */
26265 +static void mc_ce_task_block(struct task_struct *ts)
26266 +{
26267 +	/* nothing to do because it will be taken care of in schedule */
26268 +	TRACE_TASK(ts, "blocked\n");
26269 +}
26270 +
26271 +/*
26272 + * Called when a task switches from RT mode back to normal mode.
26273 + */
26274 +void mc_ce_task_exit_common(struct task_struct *ts)
26275 +{
26276 +	struct domain *dom = get_domain_for(get_partition(ts));
26277 +	struct ce_dom_data *ce_data = dom->data;
26278 +	unsigned long flags;
26279 +	struct pid *pid;
26280 +	const int lvl_a_id = tsk_mc_data(ts)->mc_task.lvl_a_id;
26281 +	struct ce_pid_table *pid_table = get_pid_table(ce_data->cpu);
26282 +
26283 +	BUG_ON(CRIT_LEVEL_A != tsk_mc_crit(ts));
26284 +	BUG_ON(lvl_a_id >= pid_table->num_pid_entries);
26285 +
26286 +	raw_spin_lock_irqsave(dom->lock, flags);
26287 +	pid = pid_table->entries[lvl_a_id].pid;
26288 +	BUG_ON(!pid);
26289 +	put_pid(pid);
26290 +	pid_table->entries[lvl_a_id].pid = NULL;
26291 +	if (ce_data->scheduled == ts)
26292 +		ce_data->scheduled = NULL;
26293 +	if (ce_data->should_schedule == ts)
26294 +		ce_data->should_schedule = NULL;
26295 +	raw_spin_unlock_irqrestore(dom->lock, flags);
26296 +}
26297 +
26298 +/***********************************************************
26299 + * Timer stuff
26300 + **********************************************************/
26301 +
26302 +/*
26303 + * Returns the next absolute time that the timer should fire.
26304 + */
26305 +lt_t mc_ce_timer_callback_common(struct domain *dom)
26306 +{
26307 +	/* relative and absolute times for cycles */
26308 +	lt_t now, offset_rel, cycle_start_abs, next_timer_abs;
26309 +	struct task_struct *should_schedule;
26310 +	struct ce_pid_table *pid_table;
26311 +	struct ce_pid_entry *pid_entry;
26312 +	struct ce_dom_data *ce_data;
26313 +	int idx, budget_overrun;
26314 +
26315 +	ce_data = dom->data;
26316 +	pid_table = get_pid_table(ce_data->cpu);
26317 +
26318 +	/* Based off of the current time, figure out the offset into the cycle
26319 +	 * and the cycle's start time, and determine what should be scheduled.
26320 +	 */
26321 +	now = litmus_clock();
26322 +	offset_rel = get_cycle_offset(now, pid_table->cycle_time);
26323 +	cycle_start_abs = now - offset_rel;
26324 +	idx = mc_ce_schedule_at(dom, offset_rel);
26325 +	pid_entry = get_pid_entry(ce_data->cpu, idx);
26326 +	next_timer_abs = cycle_start_abs + pid_entry->acc_time;
26327 +
26328 +	STRACE("timer: now: %llu  offset_rel: %llu  cycle_start_abs: %llu  "
26329 +			"next_timer_abs: %llu\n", now, offset_rel,
26330 +			cycle_start_abs, next_timer_abs);
26331 +
26332 +	/* get the task_struct (pid_task can accept a NULL) */
26333 +	rcu_read_lock();
26334 +	should_schedule = pid_task(pid_entry->pid, PIDTYPE_PID);
26335 +	rcu_read_unlock();
26336 +	ce_data->should_schedule = should_schedule;
26337 +
26338 +	if (should_schedule && 0 == atomic_read(&start_time_set)) {
26339 +		/*
26340 +		 * If jobs are not overrunning their budgets, then this
26341 +		 * should not happen.
26342 +		 */
26343 +		pid_entry->expected_job++;
26344 +		budget_overrun = pid_entry->expected_job !=
26345 +			tsk_rt(should_schedule)->job_params.job_no;
26346 +		if (budget_overrun)
26347 +			TRACE_MC_TASK(should_schedule,
26348 +				      "timer expected job number: %u "
26349 +				      "but current job: %u\n",
26350 +				      pid_entry->expected_job,
26351 +				      tsk_rt(should_schedule)->job_params.job_no);
26352 +	}
26353 +
26354 +	if (ce_data->should_schedule) {
26355 +		tsk_rt(should_schedule)->job_params.deadline =
26356 +			cycle_start_abs + pid_entry->acc_time;
26357 +		tsk_rt(should_schedule)->job_params.release =
26358 +			tsk_rt(should_schedule)->job_params.deadline -
26359 +			pid_entry->budget;
26360 +		tsk_rt(should_schedule)->job_params.exec_time = 0;
26361 +		sched_trace_task_release(should_schedule);
26362 +		set_rt_flags(ce_data->should_schedule, RT_F_RUNNING);
26363 +	}
26364 +	return next_timer_abs;
26365 +}
26366 +
26367 +/*
26368 + * What to do when a timer fires. The timer should only be armed if the number
26369 + * of PID entries is positive.
26370 + */
26371 +#ifdef CONFIG_MERGE_TIMERS
26372 +static void mc_ce_timer_callback(struct rt_event *e)
26373 +#else
26374 +static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer)
26375 +#endif
26376 +{
26377 +	struct ce_dom_data *ce_data;
26378 +	unsigned long flags;
26379 +	struct domain *dom;
26380 +	lt_t next_timer_abs;
26381 +#ifdef CONFIG_MERGE_TIMERS
26382 +	struct event_group *event_group;
26383 +	ce_data = container_of(e, struct ce_dom_data, event);
26384 +	/* use the same CPU the callbacking is executing on by passing NO_CPU */
26385 +	event_group = get_event_group_for(NO_CPU);
26386 +#else /* CONFIG_MERGE_TIMERS */
26387 +	ce_data = container_of(timer, struct ce_dom_data, timer);
26388 +#endif
26389 +	dom = get_domain_for(ce_data->cpu);
26390 +
26391 +	TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu);
26392 +
26393 +	raw_spin_lock_irqsave(dom->lock, flags);
26394 +	next_timer_abs = mc_ce_timer_callback_common(dom);
26395 +
26396 +	/* setup an event or timer for the next release in the CE schedule */
26397 +#ifdef CONFIG_MERGE_TIMERS
26398 +	add_event(event_group, e, next_timer_abs);
26399 +#else
26400 +	hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
26401 +#endif
26402 +
26403 +	if (ce_data->scheduled != ce_data->should_schedule)
26404 +		preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
26405 +
26406 +	raw_spin_unlock_irqrestore(dom->lock, flags);
26407 +
26408 +#ifndef CONFIG_MERGE_TIMERS
26409 +	return HRTIMER_RESTART;
26410 +#endif
26411 +}
26412 +
26413 +/*
26414 + * Cancel timers on all CPUs. Returns 1 if any were active.
26415 + */
26416 +static int cancel_all_timers(void)
26417 +{
26418 +	struct ce_dom_data *ce_data;
26419 +	struct domain *dom;
26420 +	int cpu, ret = 0;
26421 +#ifndef CONFIG_MERGE_TIMERS
26422 +	int cancel_res;
26423 +#endif
26424 +
26425 +	TRACE("cancel all timers\n");
26426 +
26427 +	for_each_online_cpu(cpu) {
26428 +		dom = get_domain_for(cpu);
26429 +		ce_data = dom->data;
26430 +		ce_data->should_schedule = NULL;
26431 +#ifdef CONFIG_MERGE_TIMERS
26432 +		cancel_event(&ce_data->event);
26433 +#else
26434 +		cancel_res = hrtimer_cancel(&ce_data->timer);
26435 +		atomic_set(&ce_data->timer_info.state,
26436 +				HRTIMER_START_ON_INACTIVE);
26437 +		ret = ret || cancel_res;
26438 +#endif
26439 +	}
26440 +	return ret;
26441 +}
26442 +
26443 +/*
26444 + * Arm all timers so that they start at the new value of start time.
26445 + * Any CPU without CE PID entries won't have a timer armed.
26446 + * All timers should be canceled before calling this.
26447 + */
26448 +static void arm_all_timers(void)
26449 +{
26450 +	struct domain *dom;
26451 +	struct ce_dom_data *ce_data;
26452 +	struct ce_pid_table *pid_table;
26453 +	int cpu, idx, cpu_for_timer;
26454 +	const lt_t start = atomic64_read(&start_time);
26455 +
26456 +	TRACE("arm all timers\n");
26457 +
26458 +	for_each_online_cpu(cpu) {
26459 +		dom = get_domain_for(cpu);
26460 +		ce_data = dom->data;
26461 +		pid_table = get_pid_table(cpu);
26462 +		if (0 == pid_table->num_pid_entries)
26463 +			continue;
26464 +		for (idx = 0; idx < pid_table->num_pid_entries; idx++) {
26465 +			pid_table->entries[idx].expected_job = 0;
26466 +		}
26467 +#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
26468 +		cpu_for_timer = interrupt_cpu;
26469 +#else
26470 +		cpu_for_timer = cpu;
26471 +#endif
26472 +
26473 +#ifdef CONFIG_MERGE_TIMERS
26474 +		add_event(get_event_group_for(cpu_for_timer),
26475 +				&ce_data->event, start);
26476 +#else
26477 +		hrtimer_start_on(cpu_for_timer, &ce_data->timer_info,
26478 +				&ce_data->timer, ns_to_ktime(start),
26479 +				HRTIMER_MODE_ABS_PINNED);
26480 +#endif
26481 +	}
26482 +}
26483 +
26484 +/*
26485 + * There are no real releases in the CE, but the task release syscall will
26486 + * call this. We can re-set our notion of the CE period start to make
26487 + * the schedule look pretty.
26488 + */
26489 +void mc_ce_release_at_common(struct task_struct *ts, lt_t start)
26490 +{
26491 +	TRACE_TASK(ts, "release at\n");
26492 +	if (atomic_inc_and_test(&start_time_set)) {
26493 +		/* in this case, we won the race */
26494 +		cancel_all_timers();
26495 +		atomic64_set(&start_time, start);
26496 +		arm_all_timers();
26497 +	} else
26498 +		atomic_dec(&start_time_set);
26499 +}
26500 +
26501 +long mc_ce_activate_plugin_common(void)
26502 +{
26503 +	struct ce_dom_data *ce_data;
26504 +	struct domain *dom;
26505 +	long ret;
26506 +	int cpu;
26507 +
26508 +#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
26509 +	interrupt_cpu = atomic_read(&release_master_cpu);
26510 +	if (NO_CPU == interrupt_cpu) {
26511 +		printk(KERN_ERR "LITMUS: MC-CE needs a release master\n");
26512 +		ret = -EINVAL;
26513 +		goto out;
26514 +	}
26515 +#endif
26516 +
26517 +	for_each_online_cpu(cpu) {
26518 +		dom = get_domain_for(cpu);
26519 +		ce_data = dom->data;
26520 +		ce_data->scheduled = NULL;
26521 +		ce_data->should_schedule = NULL;
26522 +	}
26523 +
26524 +	atomic_set(&start_time_set, -1);
26525 +	atomic64_set(&start_time, litmus_clock());
26526 +	/* may not want to arm timers on activation, just after release */
26527 +	arm_all_timers();
26528 +	ret = 0;
26529 +out:
26530 +	return ret;
26531 +}
26532 +
26533 +static long mc_ce_activate_plugin(void)
26534 +{
26535 +	struct domain_data *our_domains[NR_CPUS];
26536 +	int cpu, n = 0;
26537 +	long ret;
26538 +
26539 +	for_each_online_cpu(cpu) {
26540 +		BUG_ON(NR_CPUS <= n);
26541 +		our_domains[cpu] = &per_cpu(_mc_ce_doms, cpu);
26542 +		n++;
26543 +	}
26544 +	ret = mc_ce_set_domains(n, our_domains);
26545 +	if (ret)
26546 +		goto out;
26547 +	ret = mc_ce_activate_plugin_common();
26548 +out:
26549 +	return ret;
26550 +}
26551 +
26552 +static void clear_pid_entries(void)
26553 +{
26554 +	struct ce_pid_table *pid_table = NULL;
26555 +	int cpu, entry;
26556 +
26557 +	for_each_online_cpu(cpu) {
26558 +		pid_table = get_pid_table(cpu);
26559 +		pid_table->num_pid_entries = 0;
26560 +		pid_table->cycle_time = 0;
26561 +		for (entry = 0; entry < CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS;
26562 +				++entry) {
26563 +			if (NULL != pid_table->entries[entry].pid) {
26564 +				put_pid(pid_table->entries[entry].pid);
26565 +				pid_table->entries[entry].pid = NULL;
26566 +			}
26567 +			pid_table->entries[entry].budget = 0;
26568 +			pid_table->entries[entry].acc_time = 0;
26569 +			pid_table->entries[entry].expected_job = 0;
26570 +		}
26571 +	}
26572 +}
26573 +
26574 +long mc_ce_deactivate_plugin_common(void)
26575 +{
26576 +	int cpu;
26577 +	cancel_all_timers();
26578 +	for_each_online_cpu(cpu) {
26579 +		per_cpu(domains, cpu) = NULL;
26580 +	}
26581 +	return 0;
26582 +}
26583 +
26584 +/*	Plugin object	*/
26585 +static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp = {
26586 +	.plugin_name		= "MC-CE",
26587 +	.admit_task		= mc_ce_admit_task,
26588 +	.task_new		= mc_ce_task_new,
26589 +	.complete_job		= complete_job,
26590 +	.release_at		= mc_ce_release_at_common,
26591 +	.task_exit		= mc_ce_task_exit_common,
26592 +	.schedule		= mc_ce_schedule,
26593 +	.finish_switch		= mc_ce_finish_switch,
26594 +	.task_wake_up		= mc_ce_task_wake_up,
26595 +	.task_block		= mc_ce_task_block,
26596 +	.activate_plugin	= mc_ce_activate_plugin,
26597 +	.deactivate_plugin	= mc_ce_deactivate_plugin_common,
26598 +};
26599 +
26600 +static int setup_proc(void);
26601 +static int __init init_sched_mc_ce(void)
26602 +{
26603 +	raw_spinlock_t *ce_lock;
26604 +	struct domain_data *dom_data;
26605 +	struct domain *dom;
26606 +	int cpu, err;
26607 +
26608 +	for_each_online_cpu(cpu) {
26609 +		per_cpu(domains, cpu) = NULL;
26610 +		ce_lock = &per_cpu(_mc_ce_dom_locks, cpu);
26611 +		raw_spin_lock_init(ce_lock);
26612 +		dom_data = &per_cpu(_mc_ce_doms, cpu);
26613 +		dom = &dom_data->domain;
26614 +		ce_domain_init(dom, ce_lock, NULL, NULL, NULL, NULL, NULL,
26615 +				&per_cpu(_mc_ce_dom_data, cpu), cpu,
26616 +				mc_ce_timer_callback);
26617 +	}
26618 +	clear_pid_entries();
26619 +	err = setup_proc();
26620 +	if (!err)
26621 +		err = register_sched_plugin(&mc_ce_plugin);
26622 +	return err;
26623 +}
26624 +
26625 +#define BUF_SIZE PAGE_SIZE
26626 +static int write_into_proc(char *proc_buf, const int proc_size, char *fmt, ...)
26627 +{
26628 +	static char buf[BUF_SIZE];
26629 +	int n;
26630 +	va_list args;
26631 +
26632 +	/* When writing to procfs, we don't care about the trailing null that
26633 +	 * is not included in the count returned by vscnprintf.
26634 +	 */
26635 +	va_start(args, fmt);
26636 +	n = vsnprintf(buf, BUF_SIZE, fmt, args);
26637 +	va_end(args);
26638 +	if (BUF_SIZE <= n || proc_size <= n) {
26639 +		/* too big for formatting buffer or proc (less null byte) */
26640 +		n = -EINVAL;
26641 +		goto out;
26642 +	}
26643 +	memcpy(proc_buf, buf, n);
26644 +out:
26645 +	return n;
26646 +}
26647 +#undef BUF_SIZE
26648 +
26649 +/*
26650 + * Writes a PID entry to the procfs.
26651 + *
26652 + * @page buffer to write into.
26653 + * @count bytes available in the buffer
26654 + */
26655 +#define PID_SPACE 15
26656 +#define TASK_INFO_BUF (PID_SPACE + TASK_COMM_LEN)
26657 +static int write_pid_entry(char *page, const int count, const int cpu,
26658 +		const int task, struct ce_pid_entry *pid_entry)
26659 +{
26660 +	static char task_info[TASK_INFO_BUF];
26661 +	struct task_struct *ts;
26662 +	int n = 0, err, ti_n;
26663 +	char *ti_b;
26664 +
26665 +	if (pid_entry->pid) {
26666 +		rcu_read_lock();
26667 +		ts = pid_task(pid_entry->pid, PIDTYPE_PID);
26668 +		rcu_read_unlock();
26669 +
26670 +		/* get some information about the task */
26671 +		if (ts) {
26672 +			ti_b = task_info;
26673 +			ti_n = snprintf(ti_b, PID_SPACE, "%d", ts->pid);
26674 +			if (PID_SPACE <= ti_n)
26675 +				ti_n = PID_SPACE - 1;
26676 +			ti_b += ti_n;
26677 +			*ti_b = ' '; /* nuke the null byte */
26678 +			ti_b++;
26679 +			get_task_comm(ti_b, ts);
26680 +		} else {
26681 +			strncpy(task_info, "pid_task() failed :(",
26682 +					TASK_INFO_BUF);
26683 +		}
26684 +
26685 +	} else
26686 +		strncpy(task_info, "no", TASK_INFO_BUF);
26687 +	task_info[TASK_INFO_BUF - 1] = '\0'; /* just to be sure */
26688 +
26689 +	err = write_into_proc(page + n, count - n, "# task: %s\n", task_info);
26690 +	if (err < 0) {
26691 +		n = -ENOSPC;
26692 +		goto out;
26693 +	}
26694 +	n += err;
26695 +	err = write_into_proc(page + n, count - n, "%d, %d, %llu\n",
26696 +			cpu, task, pid_entry->budget);
26697 +	if (err < 0) {
26698 +		n = -ENOSPC;
26699 +		goto out;
26700 +	}
26701 +	n += err;
26702 +out:
26703 +	return n;
26704 +}
26705 +#undef PID_SPACE
26706 +#undef TASK_INFO_BUF
26707 +
26708 +/*
26709 + * Called when the user-land reads from proc.
26710 + */
26711 +static int proc_read_ce_file(char *page, char **start, off_t off, int count,
26712 +		int *eof, void *data)
26713 +{
26714 +	int n = 0, err, cpu, t;
26715 +	struct ce_pid_table *pid_table;
26716 +
26717 +	if (off > 0) {
26718 +		printk(KERN_INFO "litmus: MC-CE called read with off > 0\n");
26719 +		goto out;
26720 +	}
26721 +
26722 +	for_each_online_cpu(cpu) {
26723 +		pid_table = get_pid_table(cpu);
26724 +		for (t = 0; t < pid_table->num_pid_entries; ++t) {
26725 +			err = write_pid_entry(page + n, count - n,
26726 +					cpu, t, get_pid_entry(cpu, t));
26727 +			if (err < 0) {
26728 +				n = -ENOSPC;
26729 +				goto out;
26730 +			}
26731 +			n += err;
26732 +		}
26733 +	}
26734 +out:
26735 +	*eof = 1;
26736 +	return n;
26737 +}
26738 +
26739 +/*
26740 + * Skip a commented line.
26741 + */
26742 +static int skip_comment(const char *buf, const unsigned long max)
26743 +{
26744 +	unsigned long i = 0;
26745 +	const char *c = buf;
26746 +	if (0 == max || !c || *c != '#')
26747 +		return 0;
26748 +	++c; ++i;
26749 +	for (; i < max; ++i) {
26750 +		if (*c == '\n') {
26751 +			++c; ++i;
26752 +			break;
26753 +		}
26754 +		++c;
26755 +	}
26756 +	return i;
26757 +}
26758 +
26759 +/* a budget of 5 milliseconds is probably reasonable */
26760 +#define BUDGET_THRESHOLD 5000000ULL
26761 +static int setup_pid_entry(const int cpu, const int task, const lt_t budget)
26762 +{
26763 +	struct ce_pid_table *pid_table = get_pid_table(cpu);
26764 +	struct ce_pid_entry *new_entry = NULL;
26765 +	int err = 0;
26766 +
26767 +	/* check the inputs */
26768 +	if (cpu < 0 || NR_CPUS <= cpu || task < 0 ||
26769 +			CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS <= task ||
26770 +			budget < 1) {
26771 +		printk(KERN_INFO "litmus: bad cpu, task ID, or budget sent to "
26772 +				"MC-CE proc\n");
26773 +		err = -EINVAL;
26774 +		goto out;
26775 +	}
26776 +	/* check for small budgets */
26777 +	if (BUDGET_THRESHOLD > budget) {
26778 +		printk(KERN_CRIT "litmus: you gave a small budget for an "
26779 +				"MC-CE task; that might be an issue.\n");
26780 +	}
26781 +	/* check that we have space for a new entry */
26782 +	if (CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS <= pid_table->num_pid_entries) {
26783 +		printk(KERN_INFO "litmus: too many MC-CE tasks for cpu "
26784 +				"%d\n", cpu);
26785 +		err = -EINVAL;
26786 +		goto out;
26787 +	}
26788 +	/* add the new entry */
26789 +	new_entry = get_pid_entry(cpu, pid_table->num_pid_entries);
26790 +	BUG_ON(NULL != new_entry->pid);
26791 +	new_entry->budget = budget;
26792 +	new_entry->acc_time = pid_table->cycle_time + budget;
26793 +	/* update the domain entry */
26794 +	pid_table->cycle_time += budget;
26795 +	pid_table->num_pid_entries++;
26796 +out:
26797 +	return err;
26798 +}
26799 +#undef BUDGET_THRESHOLD
26800 +
26801 +/*
26802 + * Called when the user-land writes to proc.
26803 + *
26804 + * Error checking is quite minimal. Format is:
26805 + * <cpu>, <process ID>, <budget>
26806 + */
26807 +#define PROCFS_MAX_SIZE PAGE_SIZE
26808 +static int proc_write_ce_file(struct file *file, const char __user *buffer,
26809 +		unsigned long count, void *data)
26810 +{
26811 +	static char kbuf[PROCFS_MAX_SIZE];
26812 +	char *c = kbuf, *c_skipped;
26813 +	int cpu, task, cnt = 0, chars_read, converted, err;
26814 +	lt_t budget;
26815 +
26816 +	if (!using_linux_plugin()) {
26817 +		printk(KERN_INFO "litmus: can only edit MC-CE proc under Linux "
26818 +				"plugin\n");
26819 +		cnt = -EINVAL;
26820 +		goto out;
26821 +	}
26822 +
26823 +	if (count > PROCFS_MAX_SIZE) {
26824 +		printk(KERN_INFO "litmus: MC-CE procfs got too many bytes "
26825 +				"from user-space.\n");
26826 +		cnt = -EINVAL;
26827 +		goto out;
26828 +	}
26829 +
26830 +	if (copy_from_user(kbuf, buffer, count)) {
26831 +		printk(KERN_INFO "litmus: couldn't copy from user %s\n",
26832 +				__FUNCTION__);
26833 +		cnt = -EFAULT;
26834 +		goto out;
26835 +	}
26836 +	clear_pid_entries();
26837 +	while (cnt < count) {
26838 +		c_skipped = skip_spaces(c);
26839 +		if (c_skipped != c) {
26840 +			chars_read = c_skipped - c;
26841 +			cnt += chars_read;
26842 +			c += chars_read;
26843 +			continue;
26844 +		}
26845 +		if (*c == '#') {
26846 +			chars_read = skip_comment(c, count - cnt);
26847 +			cnt += chars_read;
26848 +			c += chars_read;
26849 +			continue;
26850 +		}
26851 +		converted = sscanf(c, "%d, %d, %llu%n", &cpu, &task, &budget,
26852 +				&chars_read);
26853 +		if (3 != converted) {
26854 +			printk(KERN_INFO "litmus: MC-CE procfs expected three "
26855 +					"arguments, but got %d.\n", converted);
26856 +			cnt = -EINVAL;
26857 +			goto out;
26858 +		}
26859 +		cnt += chars_read;
26860 +		c += chars_read;
26861 +		err = setup_pid_entry(cpu, task, budget);
26862 +		if (err) {
26863 +			cnt = -EINVAL;
26864 +			goto out;
26865 +		}
26866 +	}
26867 +out:
26868 +	return cnt;
26869 +}
26870 +#undef PROCFS_MAX_SIZE
26871 +
26872 +#define CE_FILE_PROC_NAME "ce_file"
26873 +static void tear_down_proc(void)
26874 +{
26875 +	if (ce_file)
26876 +		remove_proc_entry(CE_FILE_PROC_NAME, mc_ce_dir);
26877 +	if (mc_ce_dir)
26878 +		remove_plugin_proc_dir(&mc_ce_plugin);
26879 +}
26880 +
26881 +static int setup_proc(void)
26882 +{
26883 +	int err;
26884 +	err = make_plugin_proc_dir(&mc_ce_plugin, &mc_ce_dir);
26885 +	if (err) {
26886 +		printk(KERN_ERR "could not create MC-CE procfs dir.\n");
26887 +		goto out;
26888 +	}
26889 +	ce_file = create_proc_entry(CE_FILE_PROC_NAME, 0644, mc_ce_dir);
26890 +	if (!ce_file) {
26891 +		printk(KERN_ERR "could not create MC-CE procfs file.\n");
26892 +		err = -EIO;
26893 +		goto out_remove_proc;
26894 +	}
26895 +	ce_file->read_proc = proc_read_ce_file;
26896 +	ce_file->write_proc = proc_write_ce_file;
26897 +	goto out;
26898 +out_remove_proc:
26899 +	tear_down_proc();
26900 +out:
26901 +	return err;
26902 +}
26903 +#undef CE_FILE_PROC_NAME
26904 +
26905 +static void clean_sched_mc_ce(void)
26906 +{
26907 +	tear_down_proc();
26908 +}
26909 +
26910 +module_init(init_sched_mc_ce);
26911 +module_exit(clean_sched_mc_ce);
26912 diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
26913 index c7d5cf7..0a64273 100644
26914 --- a/litmus/sched_pfair.c
26915 +++ b/litmus/sched_pfair.c
26916 @@ -23,6 +23,13 @@
26917  
26918  #include <litmus/bheap.h>
26919  
26920 +/* to configure the cluster size */
26921 +#include <litmus/litmus_proc.h>
26922 +
26923 +#include <litmus/clustered.h>
26924 +
26925 +static enum cache_level pfair_cluster_level = GLOBAL_CLUSTER;
26926 +
26927  struct subtask {
26928  	/* measured in quanta relative to job release */
26929  	quanta_t release;
26930 @@ -41,7 +48,7 @@ struct pfair_param   {
26931  	quanta_t	last_quantum; /* when scheduled last */
26932  	int		last_cpu;     /* where scheduled last */
26933  
26934 -	unsigned int	sporadic_release; /* On wakeup, new sporadic release? */
26935 +	struct pfair_cluster* cluster; /* where this task is scheduled */
26936  
26937  	struct subtask subtasks[0];   /* allocate together with pfair_param */
26938  };
26939 @@ -49,19 +56,20 @@ struct pfair_param   {
26940  #define tsk_pfair(tsk) ((tsk)->rt_param.pfair)
26941  
26942  struct pfair_state {
26943 -	int cpu;
26944 +	struct cluster_cpu topology;
26945 +
26946  	volatile quanta_t cur_tick;    /* updated by the CPU that is advancing
26947  				        * the time */
26948  	volatile quanta_t local_tick;  /* What tick is the local CPU currently
26949  				        * executing? Updated only by the local
26950  				        * CPU. In QEMU, this may lag behind the
26951 -					* current tick. In a real system, with
26952 -					* proper timers and aligned quanta,
26953 -					* that should only be the
26954 -					* case for a very short time after the
26955 -					* time advanced. With staggered quanta,
26956 -					* it will lag for the duration of the
26957 -					* offset.
26958 +				        * current tick. In a real system, with
26959 +				        * proper timers and aligned quanta,
26960 +				        * that should only be the case for a
26961 +				        * very short time after the time
26962 +				        * advanced. With staggered quanta, it
26963 +				        * will lag for the duration of the
26964 +				        * offset.
26965  					*/
26966  
26967  	struct task_struct* linked;    /* the task that should be executing */
26968 @@ -79,25 +87,56 @@ struct pfair_state {
26969   */
26970  #define PFAIR_MAX_PERIOD 2000
26971  
26972 -/* This is the release queue wheel. It is indexed by pfair_time %
26973 - * PFAIR_MAX_PERIOD.  Each heap is ordered by PFAIR priority, so that it can be
26974 - * merged with the ready queue.
26975 - */
26976 -static struct bheap release_queue[PFAIR_MAX_PERIOD];
26977 +struct pfair_cluster {
26978 +	struct scheduling_cluster topology;
26979  
26980 -DEFINE_PER_CPU(struct pfair_state, pfair_state);
26981 -struct pfair_state* *pstate; /* short cut */
26982 +	/* The "global" time in this cluster. */
26983 +	quanta_t pfair_time; /* the "official" PFAIR clock */
26984 +	quanta_t merge_time; /* Updated after the release queue has been
26985 +			      * merged. Used by drop_all_references().
26986 +			      */
26987  
26988 -static quanta_t pfair_time = 0; /* the "official" PFAIR clock */
26989 -static quanta_t merge_time = 0; /* Updated after the release queue has been
26990 -				 * merged. Used by drop_all_references().
26991 -				 */
26992 +	/* The ready queue for this cluster. */
26993 +	rt_domain_t pfair;
26994  
26995 -static rt_domain_t pfair;
26996 +	/* This is the release queue wheel for this cluster. It is indexed by
26997 +	 * pfair_time % PFAIR_MAX_PERIOD.  Each heap is ordered by PFAIR
26998 +	 * priority, so that it can be merged with the ready queue.
26999 +	 */
27000 +	struct bheap release_queue[PFAIR_MAX_PERIOD];
27001 +};
27002  
27003 -/* The pfair_lock is used to serialize all scheduling events.
27004 - */
27005 -#define pfair_lock pfair.ready_lock
27006 +static inline struct pfair_cluster* cpu_cluster(struct pfair_state* state)
27007 +{
27008 +	return container_of(state->topology.cluster, struct pfair_cluster, topology);
27009 +}
27010 +
27011 +static inline int cpu_id(struct pfair_state* state)
27012 +{
27013 +	return state->topology.id;
27014 +}
27015 +
27016 +static inline struct pfair_state* from_cluster_list(struct list_head* pos)
27017 +{
27018 +	return list_entry(pos, struct pfair_state, topology.cluster_list);
27019 +}
27020 +
27021 +static inline raw_spinlock_t* cluster_lock(struct pfair_cluster* cluster)
27022 +{
27023 +	/* The ready_lock is used to serialize all scheduling events. */
27024 +	return &cluster->pfair.ready_lock;
27025 +}
27026 +
27027 +static inline raw_spinlock_t* cpu_lock(struct pfair_state* state)
27028 +{
27029 +	return cluster_lock(cpu_cluster(state));
27030 +}
27031 +
27032 +DEFINE_PER_CPU(struct pfair_state, pfair_state);
27033 +struct pfair_state* *pstate; /* short cut */
27034 +
27035 +static struct pfair_cluster* pfair_clusters;
27036 +static int num_pfair_clusters;
27037  
27038  /* Enable for lots of trace info.
27039   * #define PFAIR_DEBUG
27040 @@ -197,9 +236,9 @@ int pfair_ready_order(struct bheap_node* a, struct bheap_node* b)
27041  }
27042  
27043  /* return the proper release queue for time t */
27044 -static struct bheap* relq(quanta_t t)
27045 +static struct bheap* relq(struct pfair_cluster* cluster, quanta_t t)
27046  {
27047 -	struct bheap* rq = &release_queue[t % PFAIR_MAX_PERIOD];
27048 +	struct bheap* rq = cluster->release_queue + (t % PFAIR_MAX_PERIOD);
27049  	return rq;
27050  }
27051  
27052 @@ -215,17 +254,19 @@ static void __pfair_add_release(struct task_struct* t, struct bheap* queue)
27053  		    tsk_rt(t)->heap_node);
27054  }
27055  
27056 -static void pfair_add_release(struct task_struct* t)
27057 +static void pfair_add_release(struct pfair_cluster* cluster,
27058 +			      struct task_struct* t)
27059  {
27060  	BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node));
27061 -	__pfair_add_release(t, relq(cur_release(t)));
27062 +	__pfair_add_release(t, relq(cluster, cur_release(t)));
27063  }
27064  
27065  /* pull released tasks from the release queue */
27066 -static void poll_releases(quanta_t time)
27067 +static void poll_releases(struct pfair_cluster* cluster,
27068 +			  quanta_t time)
27069  {
27070 -	__merge_ready(&pfair, relq(time));
27071 -	merge_time = time;
27072 +	__merge_ready(&cluster->pfair, relq(cluster, time));
27073 +	cluster->merge_time = time;
27074  }
27075  
27076  static void check_preempt(struct task_struct* t)
27077 @@ -246,18 +287,20 @@ static void check_preempt(struct task_struct* t)
27078  	}
27079  }
27080  
27081 -/* caller must hold pfair_lock */
27082 +/* caller must hold pfair.ready_lock */
27083  static void drop_all_references(struct task_struct *t)
27084  {
27085          int cpu;
27086          struct pfair_state* s;
27087          struct bheap* q;
27088 +	struct pfair_cluster* cluster;
27089          if (bheap_node_in_heap(tsk_rt(t)->heap_node)) {
27090                  /* figure out what queue the node is in */
27091 -                if (time_before_eq(cur_release(t), merge_time))
27092 -                        q = &pfair.ready_queue;
27093 +		cluster = tsk_pfair(t)->cluster;
27094 +                if (time_before_eq(cur_release(t), cluster->merge_time))
27095 +                        q = &cluster->pfair.ready_queue;
27096                  else
27097 -                        q = relq(cur_release(t));
27098 +                        q = relq(cluster, cur_release(t));
27099                  bheap_delete(pfair_ready_order, q,
27100                              tsk_rt(t)->heap_node);
27101          }
27102 @@ -289,7 +332,6 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
27103  		} else {
27104  			/* remove task from system until it wakes */
27105  			drop_all_references(t);
27106 -			tsk_pfair(t)->sporadic_release = 1;
27107  			TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n",
27108  				   cpu, p->cur);
27109  			return 0;
27110 @@ -301,22 +343,25 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
27111  	return to_relq;
27112  }
27113  
27114 -static void advance_subtasks(quanta_t time)
27115 +static void advance_subtasks(struct pfair_cluster *cluster, quanta_t time)
27116  {
27117 -	int cpu, missed;
27118 +	int missed;
27119  	struct task_struct* l;
27120  	struct pfair_param* p;
27121 +	struct list_head* pos;
27122 +	struct pfair_state* cpu;
27123  
27124 -	for_each_online_cpu(cpu) {
27125 -		l = pstate[cpu]->linked;
27126 -		missed = pstate[cpu]->linked != pstate[cpu]->local;
27127 +	list_for_each(pos, &cluster->topology.cpus) {
27128 +		cpu = from_cluster_list(pos);
27129 +		l = cpu->linked;
27130 +		missed = cpu->linked != cpu->local;
27131  		if (l) {
27132  			p = tsk_pfair(l);
27133  			p->last_quantum = time;
27134 -			p->last_cpu     =  cpu;
27135 -			if (advance_subtask(time, l, cpu)) {
27136 -				pstate[cpu]->linked = NULL;
27137 -				pfair_add_release(l);
27138 +			p->last_cpu     =  cpu_id(cpu);
27139 +			if (advance_subtask(time, l, cpu_id(cpu))) {
27140 +				cpu->linked = NULL;
27141 +				pfair_add_release(cluster, l);
27142  			}
27143  		}
27144  	}
27145 @@ -350,8 +395,10 @@ static int pfair_link(quanta_t time, int cpu,
27146  	int target = target_cpu(time, t, cpu);
27147  	struct task_struct* prev  = pstate[cpu]->linked;
27148  	struct task_struct* other;
27149 +	struct pfair_cluster* cluster = cpu_cluster(pstate[cpu]);
27150  
27151  	if (target != cpu) {
27152 +		BUG_ON(pstate[target]->topology.cluster != pstate[cpu]->topology.cluster);
27153  		other = pstate[target]->linked;
27154  		pstate[target]->linked = t;
27155  		tsk_rt(t)->linked_on   = target;
27156 @@ -365,14 +412,14 @@ static int pfair_link(quanta_t time, int cpu,
27157  			if (prev) {
27158  				/* prev got pushed back into the ready queue */
27159  				tsk_rt(prev)->linked_on = NO_CPU;
27160 -				__add_ready(&pfair, prev);
27161 +				__add_ready(&cluster->pfair, prev);
27162  			}
27163  			/* we are done with this cpu */
27164  			return 0;
27165  		} else {
27166  			/* re-add other, it's original CPU was not considered yet */
27167  			tsk_rt(other)->linked_on = NO_CPU;
27168 -			__add_ready(&pfair, other);
27169 +			__add_ready(&cluster->pfair, other);
27170  			/* reschedule this CPU */
27171  			return 1;
27172  		}
27173 @@ -382,71 +429,77 @@ static int pfair_link(quanta_t time, int cpu,
27174  		if (prev) {
27175  			/* prev got pushed back into the ready queue */
27176  			tsk_rt(prev)->linked_on = NO_CPU;
27177 -			__add_ready(&pfair, prev);
27178 +			__add_ready(&cluster->pfair, prev);
27179  		}
27180  		/* we are done with this CPU */
27181  		return 0;
27182  	}
27183  }
27184  
27185 -static void schedule_subtasks(quanta_t time)
27186 +static void schedule_subtasks(struct pfair_cluster *cluster, quanta_t time)
27187  {
27188 -	int cpu, retry;
27189 +	int retry;
27190 +	struct list_head *pos;
27191 +	struct pfair_state *cpu_state;
27192  
27193 -	for_each_online_cpu(cpu) {
27194 +	list_for_each(pos, &cluster->topology.cpus) {
27195 +		cpu_state = from_cluster_list(pos);
27196  		retry = 1;
27197  		while (retry) {
27198 -			if (pfair_higher_prio(__peek_ready(&pfair),
27199 -					      pstate[cpu]->linked))
27200 -				retry = pfair_link(time, cpu,
27201 -						   __take_ready(&pfair));
27202 +			if (pfair_higher_prio(__peek_ready(&cluster->pfair),
27203 +					      cpu_state->linked))
27204 +				retry = pfair_link(time, cpu_id(cpu_state),
27205 +						   __take_ready(&cluster->pfair));
27206  			else
27207  				retry = 0;
27208  		}
27209  	}
27210  }
27211  
27212 -static void schedule_next_quantum(quanta_t time)
27213 +static void schedule_next_quantum(struct pfair_cluster *cluster, quanta_t time)
27214  {
27215 -	int cpu;
27216 +	struct pfair_state *cpu;
27217 +	struct list_head* pos;
27218  
27219  	/* called with interrupts disabled */
27220  	PTRACE("--- Q %lu at %llu PRE-SPIN\n",
27221  	       time, litmus_clock());
27222 -	raw_spin_lock(&pfair_lock);
27223 +	raw_spin_lock(cluster_lock(cluster));
27224  	PTRACE("<<< Q %lu at %llu\n",
27225  	       time, litmus_clock());
27226  
27227  	sched_trace_quantum_boundary();
27228  
27229 -	advance_subtasks(time);
27230 -	poll_releases(time);
27231 -	schedule_subtasks(time);
27232 +	advance_subtasks(cluster, time);
27233 +	poll_releases(cluster, time);
27234 +	schedule_subtasks(cluster, time);
27235  
27236 -	for (cpu = 0; cpu < num_online_cpus(); cpu++)
27237 -		if (pstate[cpu]->linked)
27238 +	list_for_each(pos, &cluster->topology.cpus) {
27239 +		cpu = from_cluster_list(pos);
27240 +		if (cpu->linked)
27241  			PTRACE_TASK(pstate[cpu]->linked,
27242 -				    " linked on %d.\n", cpu);
27243 +				    " linked on %d.\n", cpu_id(cpu));
27244  		else
27245 -			PTRACE("(null) linked on %d.\n", cpu);
27246 -
27247 +			PTRACE("(null) linked on %d.\n", cpu_id(cpu));
27248 +	}
27249  	/* We are done. Advance time. */
27250  	mb();
27251 -	for (cpu = 0; cpu < num_online_cpus(); cpu++) {
27252 -		if (pstate[cpu]->local_tick != pstate[cpu]->cur_tick) {
27253 +	list_for_each(pos, &cluster->topology.cpus) {
27254 +		cpu = from_cluster_list(pos);
27255 +		if (cpu->local_tick != cpu->cur_tick) {
27256  			TRACE("BAD Quantum not acked on %d "
27257  			      "(l:%lu c:%lu p:%lu)\n",
27258 -			      cpu,
27259 -			      pstate[cpu]->local_tick,
27260 -			      pstate[cpu]->cur_tick,
27261 -			      pfair_time);
27262 -			pstate[cpu]->missed_quanta++;
27263 +			      cpu_id(cpu),
27264 +			      cpu->local_tick,
27265 +			      cpu->cur_tick,
27266 +			      cluster->pfair_time);
27267 +			cpu->missed_quanta++;
27268  		}
27269 -		pstate[cpu]->cur_tick = time;
27270 +		cpu->cur_tick = time;
27271  	}
27272  	PTRACE(">>> Q %lu at %llu\n",
27273  	       time, litmus_clock());
27274 -	raw_spin_unlock(&pfair_lock);
27275 +	raw_spin_unlock(cluster_lock(cluster));
27276  }
27277  
27278  static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state)
27279 @@ -479,12 +532,12 @@ static void catchup_quanta(quanta_t from, quanta_t target,
27280  	while (time_before(cur, target)) {
27281  		wait_for_quantum(cur, state);
27282  		cur++;
27283 -		time = cmpxchg(&pfair_time,
27284 +		time = cmpxchg(&cpu_cluster(state)->pfair_time,
27285  			       cur - 1,   /* expected */
27286  			       cur        /* next     */
27287  			);
27288  		if (time == cur - 1)
27289 -			schedule_next_quantum(cur);
27290 +			schedule_next_quantum(cpu_cluster(state), cur);
27291  	}
27292  	TRACE("+++> catching up done\n");
27293  }
27294 @@ -505,14 +558,14 @@ static void pfair_tick(struct task_struct* t)
27295  		/* Attempt to advance time. First CPU to get here
27296  		 * will prepare the next quantum.
27297  		 */
27298 -		time = cmpxchg(&pfair_time,
27299 +		time = cmpxchg(&cpu_cluster(state)->pfair_time,
27300  			       cur - 1,   /* expected */
27301  			       cur        /* next     */
27302  			);
27303  		if (time == cur - 1) {
27304  			/* exchange succeeded */
27305  			wait_for_quantum(cur - 1, state);
27306 -			schedule_next_quantum(cur);
27307 +			schedule_next_quantum(cpu_cluster(state), cur);
27308  			retry = 0;
27309  		} else if (time_before(time, cur - 1)) {
27310  			/* the whole system missed a tick !? */
27311 @@ -562,80 +615,82 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
27312  	int blocks;
27313  	struct task_struct* next = NULL;
27314  
27315 -	raw_spin_lock(&pfair_lock);
27316 +	raw_spin_lock(cpu_lock(state));
27317  
27318  	blocks  = is_realtime(prev) && !is_running(prev);
27319  
27320 -	if (state->local && safe_to_schedule(state->local, state->cpu))
27321 +	if (state->local && safe_to_schedule(state->local, cpu_id(state)))
27322  		next = state->local;
27323  
27324  	if (prev != next) {
27325  		tsk_rt(prev)->scheduled_on = NO_CPU;
27326  		if (next)
27327 -			tsk_rt(next)->scheduled_on = state->cpu;
27328 +			tsk_rt(next)->scheduled_on = cpu_id(state);
27329  	}
27330  	sched_state_task_picked();
27331 -	raw_spin_unlock(&pfair_lock);
27332 +	raw_spin_unlock(cpu_lock(state));
27333  
27334  	if (next)
27335  		TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n",
27336 -			   tsk_pfair(next)->release, pfair_time, litmus_clock());
27337 +			   tsk_pfair(next)->release, cpu_cluster(state)->pfair_time, litmus_clock());
27338  	else if (is_realtime(prev))
27339 -		TRACE("Becomes idle at %lu (%llu)\n", pfair_time, litmus_clock());
27340 +		TRACE("Becomes idle at %lu (%llu)\n", cpu_cluster(state)->pfair_time, litmus_clock());
27341  
27342  	return next;
27343  }
27344  
27345  static void pfair_task_new(struct task_struct * t, int on_rq, int running)
27346  {
27347 -	unsigned long 		flags;
27348 +	unsigned long flags;
27349 +	struct pfair_cluster* cluster;
27350  
27351  	TRACE("pfair: task new %d state:%d\n", t->pid, t->state);
27352  
27353 -	raw_spin_lock_irqsave(&pfair_lock, flags);
27354 +	cluster = tsk_pfair(t)->cluster;
27355 +
27356 +	raw_spin_lock_irqsave(cluster_lock(cluster), flags);
27357  	if (running)
27358  		t->rt_param.scheduled_on = task_cpu(t);
27359  	else
27360  		t->rt_param.scheduled_on = NO_CPU;
27361  
27362 -	prepare_release(t, pfair_time + 1);
27363 -	tsk_pfair(t)->sporadic_release = 0;
27364 -	pfair_add_release(t);
27365 +	prepare_release(t, cluster->pfair_time + 1);
27366 +	pfair_add_release(cluster, t);
27367  	check_preempt(t);
27368  
27369 -	raw_spin_unlock_irqrestore(&pfair_lock, flags);
27370 +	raw_spin_unlock_irqrestore(cluster_lock(cluster), flags);
27371  }
27372  
27373  static void pfair_task_wake_up(struct task_struct *t)
27374  {
27375  	unsigned long flags;
27376  	lt_t now;
27377 +	struct pfair_cluster* cluster;
27378 +
27379 +	cluster = tsk_pfair(t)->cluster;
27380  
27381  	TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n",
27382 -		   litmus_clock(), cur_release(t), pfair_time);
27383 +		   litmus_clock(), cur_release(t), cluster->pfair_time);
27384  
27385 -	raw_spin_lock_irqsave(&pfair_lock, flags);
27386 +	raw_spin_lock_irqsave(cluster_lock(cluster), flags);
27387  
27388 -	/* It is a little unclear how to deal with Pfair
27389 -	 * tasks that block for a while and then wake. For now,
27390 -	 * if a task blocks and wakes before its next job release,
27391 +	/* If a task blocks and wakes before its next job release,
27392  	 * then it may resume if it is currently linked somewhere
27393  	 * (as if it never blocked at all). Otherwise, we have a
27394  	 * new sporadic job release.
27395  	 */
27396 -	if (tsk_pfair(t)->sporadic_release) {
27397 -		now = litmus_clock();
27398 +	now = litmus_clock();
27399 +	if (lt_before(get_deadline(t), now)) {
27400  		release_at(t, now);
27401  		prepare_release(t, time2quanta(now, CEIL));
27402  		sched_trace_task_release(t);
27403  		/* FIXME: race with pfair_time advancing */
27404 -		pfair_add_release(t);
27405 -		tsk_pfair(t)->sporadic_release = 0;
27406 +		pfair_add_release(cluster, t);
27407  	}
27408  
27409  	check_preempt(t);
27410  
27411 -	raw_spin_unlock_irqrestore(&pfair_lock, flags);
27412 +	raw_spin_unlock_irqrestore(cluster_lock(cluster), flags);
27413  	TRACE_TASK(t, "wake up done at %llu\n", litmus_clock());
27414  }
27415  
27416 @@ -649,9 +704,12 @@ static void pfair_task_block(struct task_struct *t)
27417  static void pfair_task_exit(struct task_struct * t)
27418  {
27419  	unsigned long flags;
27420 +	struct pfair_cluster *cluster;
27421  
27422  	BUG_ON(!is_realtime(t));
27423  
27424 +	cluster = tsk_pfair(t)->cluster;
27425 +
27426  	/* Remote task from release or ready queue, and ensure
27427  	 * that it is not the scheduled task for ANY CPU. We
27428  	 * do this blanket check because occassionally when
27429 @@ -659,12 +717,12 @@ static void pfair_task_exit(struct task_struct * t)
27430  	 * might not be the same as the CPU that the PFAIR scheduler
27431  	 * has chosen for it.
27432  	 */
27433 -	raw_spin_lock_irqsave(&pfair_lock, flags);
27434 +	raw_spin_lock_irqsave(cluster_lock(cluster), flags);
27435  
27436  	TRACE_TASK(t, "RIP, state:%d\n", t->state);
27437  	drop_all_references(t);
27438  
27439 -	raw_spin_unlock_irqrestore(&pfair_lock, flags);
27440 +	raw_spin_unlock_irqrestore(cluster_lock(cluster), flags);
27441  
27442  	kfree(t->rt_param.pfair);
27443  	t->rt_param.pfair = NULL;
27444 @@ -676,27 +734,27 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
27445  	unsigned long flags;
27446  	quanta_t release;
27447  
27448 +	struct pfair_cluster *cluster;
27449 +
27450 +	cluster = tsk_pfair(task)->cluster;
27451 +
27452  	BUG_ON(!is_realtime(task));
27453  
27454 -	raw_spin_lock_irqsave(&pfair_lock, flags);
27455 +	raw_spin_lock_irqsave(cluster_lock(cluster), flags);
27456  	release_at(task, start);
27457  	release = time2quanta(start, CEIL);
27458  
27459 -	if (release - pfair_time >= PFAIR_MAX_PERIOD)
27460 -		release = pfair_time + PFAIR_MAX_PERIOD;
27461 +	/* FIXME: support arbitrary offsets. */
27462 +	if (release - cluster->pfair_time >= PFAIR_MAX_PERIOD)
27463 +		release = cluster->pfair_time + PFAIR_MAX_PERIOD;
27464  
27465  	TRACE_TASK(task, "sys release at %lu\n", release);
27466  
27467  	drop_all_references(task);
27468  	prepare_release(task, release);
27469 -	pfair_add_release(task);
27470 -
27471 -	/* Clear sporadic release flag, since this release subsumes any
27472 -	 * sporadic release on wake.
27473 -	 */
27474 -	tsk_pfair(task)->sporadic_release = 0;
27475 +	pfair_add_release(cluster, task);
27476  
27477 -	raw_spin_unlock_irqrestore(&pfair_lock, flags);
27478 +	raw_spin_unlock_irqrestore(cluster_lock(cluster), flags);
27479  }
27480  
27481  static void init_subtask(struct subtask* sub, unsigned long i,
27482 @@ -755,6 +813,11 @@ static long pfair_admit_task(struct task_struct* t)
27483  	struct pfair_param* param;
27484  	unsigned long i;
27485  
27486 +	/* first check that the task is in the right cluster */
27487 +	if (cpu_cluster(pstate[tsk_rt(t)->task_params.cpu]) !=
27488 +	    cpu_cluster(pstate[task_cpu(t)]))
27489 +		return -EINVAL;
27490 +
27491  	/* Pfair is a tick-based method, so the time
27492  	 * of interest is jiffies. Calculate tick-based
27493  	 * times for everything.
27494 @@ -798,6 +861,8 @@ static long pfair_admit_task(struct task_struct* t)
27495  	param->release = 0;
27496  	param->period  = period;
27497  
27498 +	param->cluster = cpu_cluster(pstate[tsk_rt(t)->task_params.cpu]);
27499 +
27500  	for (i = 0; i < quanta; i++)
27501  		init_subtask(param->subtasks + i, i, quanta, period);
27502  
27503 @@ -813,24 +878,88 @@ static long pfair_admit_task(struct task_struct* t)
27504  	return 0;
27505  }
27506  
27507 +static void pfair_init_cluster(struct pfair_cluster* cluster)
27508 +{
27509 +	int i;
27510 +
27511 +	/* initialize release queue */
27512 +	for (i = 0; i < PFAIR_MAX_PERIOD; i++)
27513 +		bheap_init(&cluster->release_queue[i]);
27514 +	rt_domain_init(&cluster->pfair, pfair_ready_order, NULL, NULL);
27515 +	INIT_LIST_HEAD(&cluster->topology.cpus);
27516 +}
27517 +
27518 +static void cleanup_clusters(void)
27519 +{
27520 +	int i;
27521 +
27522 +	if (num_pfair_clusters)
27523 +		kfree(pfair_clusters);
27524 +	pfair_clusters = NULL;
27525 +	num_pfair_clusters = 0;
27526 +
27527 +	/* avoid stale pointers */
27528 +	for (i = 0; i < NR_CPUS; i++)
27529 +		pstate[i]->topology.cluster = NULL;
27530 +}
27531 +
27532  static long pfair_activate_plugin(void)
27533  {
27534 -	int cpu;
27535 +	int err, i;
27536  	struct pfair_state* state;
27537 +	struct pfair_cluster* cluster ;
27538 +	quanta_t now;
27539 +	int cluster_size;
27540 +	struct cluster_cpu* cpus[NR_CPUS];
27541 +	struct scheduling_cluster* clust[NR_CPUS];
27542  
27543 -	state = &__get_cpu_var(pfair_state);
27544 -	pfair_time = current_quantum(state);
27545 +	cluster_size = get_cluster_size(pfair_cluster_level);
27546  
27547 -	TRACE("Activating PFAIR at q=%lu\n", pfair_time);
27548 +	if (cluster_size <= 0 || num_online_cpus() % cluster_size != 0)
27549 +		return -EINVAL;
27550  
27551 -	for (cpu = 0; cpu < num_online_cpus(); cpu++)  {
27552 -		state = &per_cpu(pfair_state, cpu);
27553 -		state->cur_tick   = pfair_time;
27554 -		state->local_tick = pfair_time;
27555 +	num_pfair_clusters = num_online_cpus() / cluster_size;
27556 +
27557 +	pfair_clusters = kzalloc(num_pfair_clusters * sizeof(struct pfair_cluster), GFP_ATOMIC);
27558 +	if (!pfair_clusters) {
27559 +		num_pfair_clusters = 0;
27560 +		printk(KERN_ERR "Could not allocate Pfair clusters!\n");
27561 +		return -ENOMEM;
27562 +	}
27563 +
27564 +	state = &__get_cpu_var(pfair_state);
27565 +	now = current_quantum(state);
27566 +	TRACE("Activating PFAIR at q=%lu\n", now);
27567 +
27568 +	for (i = 0; i < num_pfair_clusters; i++) {
27569 +		cluster = &pfair_clusters[i];
27570 +		pfair_init_cluster(cluster);
27571 +		cluster->pfair_time = now;
27572 +		clust[i] = &cluster->topology;
27573 +	}
27574 +
27575 +	for (i = 0; i < num_online_cpus(); i++)  {
27576 +		state = &per_cpu(pfair_state, i);
27577 +		state->cur_tick   = now;
27578 +		state->local_tick = now;
27579  		state->missed_quanta = 0;
27580 -		state->offset     = cpu_stagger_offset(cpu);
27581 +		state->offset     = cpu_stagger_offset(i);
27582 +		printk(KERN_ERR "cpus[%d] set; %d\n", i, num_online_cpus());
27583 +		cpus[i] = &state->topology;
27584  	}
27585  
27586 +	err = assign_cpus_to_clusters(pfair_cluster_level, clust, num_pfair_clusters,
27587 +				      cpus, num_online_cpus());
27588 +
27589 +	if (err < 0)
27590 +		cleanup_clusters();
27591 +
27592 +	return err;
27593 +}
27594 +
27595 +static long pfair_deactivate_plugin(void)
27596 +{
27597 +	cleanup_clusters();
27598  	return 0;
27599  }
27600  
27601 @@ -847,30 +976,29 @@ static struct sched_plugin pfair_plugin __cacheline_aligned_in_smp = {
27602  	.release_at		= pfair_release_at,
27603  	.complete_job		= complete_job,
27604  	.activate_plugin	= pfair_activate_plugin,
27605 +	.deactivate_plugin	= pfair_deactivate_plugin,
27606  };
27607  
27608 +
27609 +static struct proc_dir_entry *cluster_file = NULL, *pfair_dir = NULL;
27610 +
27611  static int __init init_pfair(void)
27612  {
27613 -	int cpu, i;
27614 +	int cpu, err, fs;
27615  	struct pfair_state *state;
27616  
27617 -
27618  	/*
27619  	 * initialize short_cut for per-cpu pfair state;
27620  	 * there may be a problem here if someone removes a cpu
27621  	 * while we are doing this initialization... and if cpus
27622 -	 * are added / removed later... is it a _real_ problem?
27623 +	 * are added / removed later... but we don't support CPU hotplug atm anyway.
27624  	 */
27625  	pstate = kmalloc(sizeof(struct pfair_state*) * num_online_cpus(), GFP_KERNEL);
27626  
27627 -	/* initialize release queue */
27628 -	for (i = 0; i < PFAIR_MAX_PERIOD; i++)
27629 -		bheap_init(&release_queue[i]);
27630 -
27631  	/* initialize CPU state */
27632  	for (cpu = 0; cpu < num_online_cpus(); cpu++)  {
27633  		state = &per_cpu(pfair_state, cpu);
27634 -		state->cpu 	  = cpu;
27635 +		state->topology.id = cpu;
27636  		state->cur_tick   = 0;
27637  		state->local_tick = 0;
27638  		state->linked     = NULL;
27639 @@ -881,13 +1009,29 @@ static int __init init_pfair(void)
27640  		pstate[cpu] = state;
27641  	}
27642  
27643 -	rt_domain_init(&pfair, pfair_ready_order, NULL, NULL);
27644 -	return register_sched_plugin(&pfair_plugin);
27645 +	pfair_clusters = NULL;
27646 +	num_pfair_clusters = 0;
27647 +
27648 +	err = register_sched_plugin(&pfair_plugin);
27649 +	if (!err) {
27650 +		fs = make_plugin_proc_dir(&pfair_plugin, &pfair_dir);
27651 +		if (!fs)
27652 +			cluster_file = create_cluster_file(pfair_dir, &pfair_cluster_level);
27653 +		else
27654 +			printk(KERN_ERR "Could not allocate PFAIR procfs dir.\n");
27655 +	}
27656 +
27657 +	return err;
27658  }
27659  
27660  static void __exit clean_pfair(void)
27661  {
27662  	kfree(pstate);
27663 +
27664 +	if (cluster_file)
27665 +		remove_proc_entry("cluster", pfair_dir);
27666 +	if (pfair_dir)
27667 +		remove_plugin_proc_dir(&pfair_plugin);
27668  }
27669  
27670  module_init(init_pfair);
27671 diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
27672 index d912a64..d54886d 100644
27673 --- a/litmus/sched_plugin.c
27674 +++ b/litmus/sched_plugin.c
27675 @@ -121,23 +121,12 @@ static long litmus_dummy_deactivate_plugin(void)
27676  	return 0;
27677  }
27678  
27679 -#ifdef CONFIG_FMLP
27680 +#ifdef CONFIG_LITMUS_LOCKING
27681  
27682 -static long litmus_dummy_inherit_priority(struct pi_semaphore *sem,
27683 -					  struct task_struct *new_owner)
27684 +static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type,
27685 +				       void* __user config)
27686  {
27687 -	return -ENOSYS;
27688 -}
27689 -
27690 -static long litmus_dummy_return_priority(struct pi_semaphore *sem)
27691 -{
27692 -	return -ENOSYS;
27693 -}
27694 -
27695 -static long litmus_dummy_pi_block(struct pi_semaphore *sem,
27696 -				  struct task_struct *new_waiter)
27697 -{
27698 -	return -ENOSYS;
27699 +	return -ENXIO;
27700  }
27701  
27702  #endif
27703 @@ -158,10 +147,8 @@ struct sched_plugin linux_sched_plugin = {
27704  	.finish_switch = litmus_dummy_finish_switch,
27705  	.activate_plugin = litmus_dummy_activate_plugin,
27706  	.deactivate_plugin = litmus_dummy_deactivate_plugin,
27707 -#ifdef CONFIG_FMLP
27708 -	.inherit_priority = litmus_dummy_inherit_priority,
27709 -	.return_priority = litmus_dummy_return_priority,
27710 -	.pi_block = litmus_dummy_pi_block,
27711 +#ifdef CONFIG_LITMUS_LOCKING
27712 +	.allocate_lock = litmus_dummy_allocate_lock,
27713  #endif
27714  	.admit_task = litmus_dummy_admit_task
27715  };
27716 @@ -198,10 +185,8 @@ int register_sched_plugin(struct sched_plugin* plugin)
27717  	CHECK(complete_job);
27718  	CHECK(activate_plugin);
27719  	CHECK(deactivate_plugin);
27720 -#ifdef CONFIG_FMLP
27721 -	CHECK(inherit_priority);
27722 -	CHECK(return_priority);
27723 -	CHECK(pi_block);
27724 +#ifdef CONFIG_LITMUS_LOCKING
27725 +	CHECK(allocate_lock);
27726  #endif
27727  	CHECK(admit_task);
27728  
27729 diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
27730 index b89823d..abb06fa 100644
27731 --- a/litmus/sched_psn_edf.c
27732 +++ b/litmus/sched_psn_edf.c
27733 @@ -71,6 +71,66 @@ static void preempt(psnedf_domain_t *pedf)
27734  	preempt_if_preemptable(pedf->scheduled, pedf->cpu);
27735  }
27736  
27737 +#ifdef CONFIG_LITMUS_LOCKING
27738 +
27739 +static void boost_priority(struct task_struct* t)
27740 +{
27741 +	unsigned long		flags;
27742 +	psnedf_domain_t* 	pedf = task_pedf(t);
27743 +	lt_t			now;
27744 +
27745 +	raw_spin_lock_irqsave(&pedf->slock, flags);
27746 +	now = litmus_clock();
27747 +
27748 +	TRACE_TASK(t, "priority boosted at %llu\n", now);
27749 +
27750 +	tsk_rt(t)->priority_boosted = 1;
27751 +	tsk_rt(t)->boost_start_time = now;
27752 +
27753 +	if (pedf->scheduled != t) {
27754 +		/* holder may be queued: first stop queue changes */
27755 +		raw_spin_lock(&pedf->domain.release_lock);
27756 +		if (is_queued(t) &&
27757 +		    /* If it is queued, then we need to re-order. */
27758 +		    bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node) &&
27759 +		    /* If we bubbled to the top, then we need to check for preemptions. */
27760 +		    edf_preemption_needed(&pedf->domain, pedf->scheduled))
27761 +				preempt(pedf);
27762 +		raw_spin_unlock(&pedf->domain.release_lock);
27763 +	} /* else: nothing to do since the job is not queued while scheduled */
27764 +
27765 +	raw_spin_unlock_irqrestore(&pedf->slock, flags);
27766 +}
27767 +
27768 +static void unboost_priority(struct task_struct* t)
27769 +{
27770 +	unsigned long		flags;
27771 +	psnedf_domain_t* 	pedf = task_pedf(t);
27772 +	lt_t			now;
27773 +
27774 +	raw_spin_lock_irqsave(&pedf->slock, flags);
27775 +	now = litmus_clock();
27776 +
27777 +	/* assumption: this only happens when the job is scheduled */
27778 +	BUG_ON(pedf->scheduled != t);
27779 +
27780 +	TRACE_TASK(t, "priority restored at %llu\n", now);
27781 +
27782 +	/* priority boosted jobs must be scheduled */
27783 +	BUG_ON(pedf->scheduled != t);
27784 +
27785 +	tsk_rt(t)->priority_boosted = 0;
27786 +	tsk_rt(t)->boost_start_time = 0;
27787 +
27788 +	/* check if this changes anything */
27789 +	if (edf_preemption_needed(&pedf->domain, pedf->scheduled))
27790 +		preempt(pedf);
27791 +
27792 +	raw_spin_unlock_irqrestore(&pedf->slock, flags);
27793 +}
27794 +
27795 +#endif
27796 +
27797  /* This check is trivial in partioned systems as we only have to consider
27798   * the CPU of the partition.
27799   */
27800 @@ -252,15 +312,16 @@ static void psnedf_task_wake_up(struct task_struct *task)
27801  	TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
27802  	raw_spin_lock_irqsave(&pedf->slock, flags);
27803  	BUG_ON(is_queued(task));
27804 +	now = litmus_clock();
27805 +	if (is_tardy(task, now)
27806 +#ifdef CONFIG_LITMUS_LOCKING
27807  	/* We need to take suspensions because of semaphores into
27808  	 * account! If a job resumes after being suspended due to acquiring
27809  	 * a semaphore, it should never be treated as a new job release.
27810 -	 *
27811 -	 * FIXME: This should be done in some more predictable and userspace-controlled way.
27812  	 */
27813 -	now = litmus_clock();
27814 -	if (is_tardy(task, now) &&
27815 -	    get_rt_flags(task) != RT_F_EXIT_SEM) {
27816 +	    && !is_priority_boosted(task)
27817 +#endif
27818 +		) {
27819  		/* new sporadic release */
27820  		release_at(task, now);
27821  		sched_trace_task_release(task);
27822 @@ -309,128 +370,213 @@ static void psnedf_task_exit(struct task_struct * t)
27823  	raw_spin_unlock_irqrestore(&pedf->slock, flags);
27824  }
27825  
27826 -#ifdef CONFIG_FMLP
27827 -static long psnedf_pi_block(struct pi_semaphore *sem,
27828 -			    struct task_struct *new_waiter)
27829 +#ifdef CONFIG_LITMUS_LOCKING
27830 +
27831 +#include <litmus/fdso.h>
27832 +#include <litmus/srp.h>
27833 +
27834 +/* ******************** SRP support ************************ */
27835 +
27836 +static unsigned int psnedf_get_srp_prio(struct task_struct* t)
27837 +{
27838 +	/* assumes implicit deadlines */
27839 +	return get_rt_period(t);
27840 +}
27841 +
27842 +static long psnedf_activate_plugin(void)
27843  {
27844 -	psnedf_domain_t* 	pedf;
27845 -	rt_domain_t*		edf;
27846 -	struct task_struct*	t;
27847 -	int cpu  = get_partition(new_waiter);
27848 -
27849 -	BUG_ON(!new_waiter);
27850 -
27851 -	if (edf_higher_prio(new_waiter, sem->hp.cpu_task[cpu])) {
27852 -		TRACE_TASK(new_waiter, " boosts priority\n");
27853 -		pedf = task_pedf(new_waiter);
27854 -		edf  = task_edf(new_waiter);
27855 -
27856 -		/* interrupts already disabled */
27857 -		raw_spin_lock(&pedf->slock);
27858 -
27859 -		/* store new highest-priority task */
27860 -		sem->hp.cpu_task[cpu] = new_waiter;
27861 -		if (sem->holder &&
27862 -		    get_partition(sem->holder) == get_partition(new_waiter)) {
27863 -			/* let holder inherit */
27864 -			sem->holder->rt_param.inh_task = new_waiter;
27865 -			t = sem->holder;
27866 -			if (is_queued(t)) {
27867 -				/* queued in domain*/
27868 -				remove(edf, t);
27869 -				/* readd to make priority change take place */
27870 -				/* FIXME: this looks outdated */
27871 -				if (is_released(t, litmus_clock()))
27872 -					__add_ready(edf, t);
27873 -				else
27874 -					add_release(edf, t);
27875 -			}
27876 -		}
27877 +	get_srp_prio = psnedf_get_srp_prio;
27878 +	return 0;
27879 +}
27880  
27881 -		/* check if we need to reschedule */
27882 -		if (edf_preemption_needed(edf, current))
27883 -			preempt(pedf);
27884 +/* ******************** FMLP support ********************** */
27885  
27886 -		raw_spin_unlock(&pedf->slock);
27887 +/* struct for semaphore with priority inheritance */
27888 +struct fmlp_semaphore {
27889 +	struct litmus_lock litmus_lock;
27890 +
27891 +	/* current resource holder */
27892 +	struct task_struct *owner;
27893 +
27894 +	/* FIFO queue of waiting tasks */
27895 +	wait_queue_head_t wait;
27896 +};
27897 +
27898 +static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock)
27899 +{
27900 +	return container_of(lock, struct fmlp_semaphore, litmus_lock);
27901 +}
27902 +int psnedf_fmlp_lock(struct litmus_lock* l)
27903 +{
27904 +	struct task_struct* t = current;
27905 +	struct fmlp_semaphore *sem = fmlp_from_lock(l);
27906 +	wait_queue_t wait;
27907 +	unsigned long flags;
27908 +
27909 +	if (!is_realtime(t))
27910 +		return -EPERM;
27911 +
27912 +	spin_lock_irqsave(&sem->wait.lock, flags);
27913 +
27914 +	if (sem->owner) {
27915 +		/* resource is not free => must suspend and wait */
27916 +
27917 +		init_waitqueue_entry(&wait, t);
27918 +
27919 +		/* FIXME: interruptible would be nice some day */
27920 +		set_task_state(t, TASK_UNINTERRUPTIBLE);
27921 +
27922 +		__add_wait_queue_tail_exclusive(&sem->wait, &wait);
27923 +
27924 +		/* release lock before sleeping */
27925 +		spin_unlock_irqrestore(&sem->wait.lock, flags);
27926 +
27927 +		/* We depend on the FIFO order.  Thus, we don't need to recheck
27928 +		 * when we wake up; we are guaranteed to have the lock since
27929 +		 * there is only one wake up per release.
27930 +		 */
27931 +
27932 +		schedule();
27933 +
27934 +		/* Since we hold the lock, no other task will change
27935 +		 * ->owner. We can thus check it without acquiring the spin
27936 +		 * lock. */
27937 +		BUG_ON(sem->owner != t);
27938 +
27939 +		/* FIXME: could we punt the dequeuing to the previous job,
27940 +		 * which is holding the spinlock anyway? */
27941 +		remove_wait_queue(&sem->wait, &wait);
27942 +	} else {
27943 +		/* it's ours now */
27944 +		sem->owner = t;
27945 +
27946 +		/* mark the task as priority-boosted. */
27947 +		boost_priority(t);
27948 +
27949 +		spin_unlock_irqrestore(&sem->wait.lock, flags);
27950  	}
27951  
27952  	return 0;
27953  }
27954  
27955 -static long psnedf_inherit_priority(struct pi_semaphore *sem,
27956 -				    struct task_struct *new_owner)
27957 +int psnedf_fmlp_unlock(struct litmus_lock* l)
27958  {
27959 -	int cpu  = get_partition(new_owner);
27960 -
27961 -	new_owner->rt_param.inh_task = sem->hp.cpu_task[cpu];
27962 -	if (sem->hp.cpu_task[cpu] && new_owner != sem->hp.cpu_task[cpu]) {
27963 -		TRACE_TASK(new_owner,
27964 -			   "inherited priority from %s/%d\n",
27965 -			   sem->hp.cpu_task[cpu]->comm,
27966 -			   sem->hp.cpu_task[cpu]->pid);
27967 +	struct task_struct *t = current, *next;
27968 +	struct fmlp_semaphore *sem = fmlp_from_lock(l);
27969 +	unsigned long flags;
27970 +	int err = 0;
27971 +
27972 +	spin_lock_irqsave(&sem->wait.lock, flags);
27973 +
27974 +	if (sem->owner != t) {
27975 +		err = -EINVAL;
27976 +		goto out;
27977 +	}
27978 +
27979 +	/* we lose the benefit of priority boosting */
27980 +
27981 +	unboost_priority(t);
27982 +
27983 +	/* check if there are jobs waiting for this resource */
27984 +	next = waitqueue_first(&sem->wait);
27985 +	if (next) {
27986 +		/* boost next job */
27987 +		boost_priority(next);
27988 +
27989 +		/* next becomes the resouce holder */
27990 +		sem->owner = next;
27991 +
27992 +		/* wake up next */
27993 +		wake_up_process(next);
27994  	} else
27995 -		TRACE_TASK(new_owner,
27996 -			   "cannot inherit priority: "
27997 -			   "no higher priority job waits on this CPU!\n");
27998 -	/* make new owner non-preemptable as required by FMLP under
27999 -	 * PSN-EDF.
28000 -	 */
28001 -	make_np(new_owner);
28002 -	return 0;
28003 -}
28004 +		/* resource becomes available */
28005 +		sem->owner = NULL;
28006  
28007 +out:
28008 +	spin_unlock_irqrestore(&sem->wait.lock, flags);
28009 +	return err;
28010 +}
28011  
28012 -/* This function is called on a semaphore release, and assumes that
28013 - * the current task is also the semaphore holder.
28014 - */
28015 -static long psnedf_return_priority(struct pi_semaphore *sem)
28016 +int psnedf_fmlp_close(struct litmus_lock* l)
28017  {
28018 -	struct task_struct* 	t    = current;
28019 -	psnedf_domain_t* 	pedf = task_pedf(t);
28020 -	rt_domain_t*		edf  = task_edf(t);
28021 -	int 			ret  = 0;
28022 -	int			cpu  = get_partition(current);
28023 -	int still_np;
28024 +	struct task_struct *t = current;
28025 +	struct fmlp_semaphore *sem = fmlp_from_lock(l);
28026 +	unsigned long flags;
28027  
28028 +	int owner;
28029  
28030 -        /* Find new highest-priority semaphore task
28031 -	 * if holder task is the current hp.cpu_task[cpu].
28032 -	 *
28033 -	 * Calling function holds sem->wait.lock.
28034 -	 */
28035 -	if (t == sem->hp.cpu_task[cpu])
28036 -		edf_set_hp_cpu_task(sem, cpu);
28037 +	spin_lock_irqsave(&sem->wait.lock, flags);
28038  
28039 -	still_np = take_np(current);
28040 +	owner = sem->owner == t;
28041  
28042 -	/* Since we don't nest resources, this
28043 -	 * should always be zero */
28044 -	BUG_ON(still_np);
28045 +	spin_unlock_irqrestore(&sem->wait.lock, flags);
28046  
28047 -	if (current->rt_param.inh_task) {
28048 -		TRACE_CUR("return priority of %s/%d\n",
28049 -			  current->rt_param.inh_task->comm,
28050 -			  current->rt_param.inh_task->pid);
28051 -	} else
28052 -		TRACE_CUR(" no priority to return %p\n", sem);
28053 +	if (owner)
28054 +		psnedf_fmlp_unlock(l);
28055  
28056 +	return 0;
28057 +}
28058  
28059 -	/* Always check for delayed preemptions that might have become
28060 -	 * necessary due to non-preemptive execution.
28061 -	 */
28062 -	raw_spin_lock(&pedf->slock);
28063 +void psnedf_fmlp_free(struct litmus_lock* lock)
28064 +{
28065 +	kfree(fmlp_from_lock(lock));
28066 +}
28067  
28068 -	/* Reset inh_task to NULL. */
28069 -	current->rt_param.inh_task = NULL;
28070 +static struct litmus_lock_ops psnedf_fmlp_lock_ops = {
28071 +	.close  = psnedf_fmlp_close,
28072 +	.lock   = psnedf_fmlp_lock,
28073 +	.unlock = psnedf_fmlp_unlock,
28074 +	.deallocate = psnedf_fmlp_free,
28075 +};
28076  
28077 -	/* check if we need to reschedule */
28078 -	if (edf_preemption_needed(edf, current))
28079 -		preempt(pedf);
28080 +static struct litmus_lock* psnedf_new_fmlp(void)
28081 +{
28082 +	struct fmlp_semaphore* sem;
28083  
28084 -	raw_spin_unlock(&pedf->slock);
28085 +	sem = kmalloc(sizeof(*sem), GFP_KERNEL);
28086 +	if (!sem)
28087 +		return NULL;
28088  
28089 +	sem->owner   = NULL;
28090 +	init_waitqueue_head(&sem->wait);
28091 +	sem->litmus_lock.ops = &psnedf_fmlp_lock_ops;
28092  
28093 -	return ret;
28094 +	return &sem->litmus_lock;
28095 +}
28096 +
28097 +/* **** lock constructor **** */
28098 +
28099 +
28100 +static long psnedf_allocate_lock(struct litmus_lock **lock, int type,
28101 +				 void* __user unused)
28102 +{
28103 +	int err = -ENXIO;
28104 +	struct srp_semaphore* srp;
28105 +
28106 +	/* PSN-EDF currently supports the SRP for local resources and the FMLP
28107 +	 * for global resources. */
28108 +	switch (type) {
28109 +	case FMLP_SEM:
28110 +		/* Flexible Multiprocessor Locking Protocol */
28111 +		*lock = psnedf_new_fmlp();
28112 +		if (*lock)
28113 +			err = 0;
28114 +		else
28115 +			err = -ENOMEM;
28116 +		break;
28117 +
28118 +	case SRP_SEM:
28119 +		/* Baker's Stack Resource Policy */
28120 +		srp = allocate_srp_semaphore();
28121 +		if (srp) {
28122 +			*lock = &srp->litmus_lock;
28123 +			err = 0;
28124 +		} else
28125 +			err = -ENOMEM;
28126 +		break;
28127 +	};
28128 +
28129 +	return err;
28130  }
28131  
28132  #endif
28133 @@ -443,9 +589,6 @@ static long psnedf_admit_task(struct task_struct* tsk)
28134  /*	Plugin object	*/
28135  static struct sched_plugin psn_edf_plugin __cacheline_aligned_in_smp = {
28136  	.plugin_name		= "PSN-EDF",
28137 -#ifdef CONFIG_SRP
28138 -	.srp_active		= 1,
28139 -#endif
28140  	.tick			= psnedf_tick,
28141  	.task_new		= psnedf_task_new,
28142  	.complete_job		= complete_job,
28143 @@ -453,13 +596,11 @@ static struct sched_plugin psn_edf_plugin __cacheline_aligned_in_smp = {
28144  	.schedule		= psnedf_schedule,
28145  	.task_wake_up		= psnedf_task_wake_up,
28146  	.task_block		= psnedf_task_block,
28147 -#ifdef CONFIG_FMLP
28148 -	.fmlp_active		= 1,
28149 -	.pi_block		= psnedf_pi_block,
28150 -	.inherit_priority	= psnedf_inherit_priority,
28151 -	.return_priority	= psnedf_return_priority,
28152 +	.admit_task		= psnedf_admit_task,
28153 +#ifdef CONFIG_LITMUS_LOCKING
28154 +	.allocate_lock		= psnedf_allocate_lock,
28155 +	.activate_plugin	= psnedf_activate_plugin,
28156  #endif
28157 -	.admit_task		= psnedf_admit_task
28158  };
28159  
28160  
28161 diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
28162 index a15b25d..f923280 100644
28163 --- a/litmus/sched_task_trace.c
28164 +++ b/litmus/sched_task_trace.c
28165 @@ -14,7 +14,11 @@
28166  #include <litmus/sched_trace.h>
28167  #include <litmus/feather_trace.h>
28168  #include <litmus/ftdev.h>
28169 +#include <litmus/rt_domain.h>
28170 +#include <litmus/domain.h>
28171 +#include <litmus/event_group.h>
28172  
28173 +#include <litmus/sched_mc.h>
28174  
28175  #define NO_EVENTS		(1 << CONFIG_SCHED_TASK_TRACE_SHIFT)
28176  
28177 @@ -131,6 +135,8 @@ feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long
28178  		rec->data.param.period    = get_rt_period(t);
28179  		rec->data.param.phase     = get_rt_phase(t);
28180  		rec->data.param.partition = get_partition(t);
28181 +		rec->data.param.class     = get_class(t);
28182 +		rec->data.param.level     = (tsk_mc_data(t) ? tsk_mc_crit(t) : -1);
28183  		put_record(rec);
28184  	}
28185  }
28186 @@ -140,8 +146,8 @@ feather_callback void do_sched_trace_task_release(unsigned long id, unsigned lon
28187  	struct task_struct *t = (struct task_struct*) _task;
28188  	struct st_event_record* rec = get_record(ST_RELEASE, t);
28189  	if (rec) {
28190 -		rec->data.release.release  = get_release(t);
28191 -		rec->data.release.deadline = get_deadline(t);
28192 +		rec->data.release.release  = tsk_rt(t)->job_params.real_release;
28193 +		rec->data.release.deadline = tsk_rt(t)->job_params.real_deadline;
28194  		put_record(rec);
28195  	}
28196  }
28197 @@ -224,3 +230,17 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
28198  		put_record(rec);
28199  	}
28200  }
28201 +
28202 +feather_callback void do_sched_trace_action(unsigned long id,
28203 +					    unsigned long _task,
28204 +					    unsigned long action)
28205 +{
28206 +	struct task_struct *t = (struct task_struct*) _task;
28207 +	struct st_event_record* rec = get_record(ST_ACTION, t);
28208 +
28209 +	if (rec) {
28210 +		rec->data.action.when   = now();
28211 +		rec->data.action.action = action;
28212 +		put_record(rec);
28213 +	}
28214 +}
28215 diff --git a/litmus/srp.c b/litmus/srp.c
28216 index cb57759..2ed4ec1 100644
28217 --- a/litmus/srp.c
28218 +++ b/litmus/srp.c
28219 @@ -12,42 +12,25 @@
28220  #include <litmus/trace.h>
28221  
28222  
28223 -#ifdef CONFIG_SRP
28224 +#ifdef CONFIG_LITMUS_LOCKING
28225  
28226 -struct srp_priority {
28227 -	struct list_head	list;
28228 -        unsigned int 		period;
28229 -	pid_t			pid;
28230 -};
28231 +#include <litmus/srp.h>
28232  
28233 -#define list2prio(l) list_entry(l, struct srp_priority, list)
28234 -
28235 -/* SRP task priority comparison function. Smaller periods have highest
28236 - * priority, tie-break is PID. Special case: period == 0 <=> no priority
28237 - */
28238 -static int srp_higher_prio(struct srp_priority* first,
28239 -			   struct srp_priority* second)
28240 -{
28241 -	if (!first->period)
28242 -		return 0;
28243 -	else
28244 -		return  !second->period ||
28245 -			first->period < second->period || (
28246 -			first->period == second->period &&
28247 -			first->pid < second->pid);
28248 -}
28249 +srp_prioritization_t get_srp_prio;
28250  
28251  struct srp {
28252  	struct list_head	ceiling;
28253  	wait_queue_head_t	ceiling_blocked;
28254  };
28255 +#define system_ceiling(srp) list2prio(srp->ceiling.next)
28256 +#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling)
28257  
28258 +#define UNDEF_SEM -2
28259  
28260  atomic_t srp_objects_in_use = ATOMIC_INIT(0);
28261  
28262  DEFINE_PER_CPU(struct srp, srp);
28263  
28264 -
28265  /* Initialize SRP semaphores at boot time. */
28266  static int __init srp_init(void)
28267  {
28268 @@ -64,30 +47,35 @@ static int __init srp_init(void)
28269  }
28270  module_init(srp_init);
28271  
28272 +/* SRP task priority comparison function. Smaller numeric values have higher
28273 + * priority, tie-break is PID. Special case: priority == 0 <=> no priority
28274 + */
28275 +static int srp_higher_prio(struct srp_priority* first,
28276 +			   struct srp_priority* second)
28277 +{
28278 +	if (!first->priority)
28279 +		return 0;
28280 +	else
28281 +		return  !second->priority ||
28282 +			first->priority < second->priority || (
28283 +			first->priority == second->priority &&
28284 +			first->pid < second->pid);
28285 +}
28286  
28287 -#define system_ceiling(srp) list2prio(srp->ceiling.next)
28288 -
28289 -
28290 -#define UNDEF_SEM -2
28291 -
28292 -
28293 -/* struct for uniprocessor SRP "semaphore" */
28294 -struct srp_semaphore {
28295 -	struct srp_priority ceiling;
28296 -	struct task_struct* owner;
28297 -	int cpu; /* cpu associated with this "semaphore" and resource */
28298 -};
28299 -
28300 -#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling)
28301  
28302  static int srp_exceeds_ceiling(struct task_struct* first,
28303  			       struct srp* srp)
28304  {
28305 -	return list_empty(&srp->ceiling) ||
28306 -	       get_rt_period(first) < system_ceiling(srp)->period ||
28307 -	       (get_rt_period(first) == system_ceiling(srp)->period &&
28308 -		first->pid < system_ceiling(srp)->pid) ||
28309 -		ceiling2sem(system_ceiling(srp))->owner == first;
28310 +	struct srp_priority prio;
28311 +
28312 +	if (list_empty(&srp->ceiling))
28313 +		return 1;
28314 +	else {
28315 +		prio.pid = first->pid;
28316 +		prio.priority = get_srp_prio(first);
28317 +		return srp_higher_prio(&prio, system_ceiling(srp)) ||
28318 +			ceiling2sem(system_ceiling(srp))->owner == first;
28319 +	}
28320  }
28321  
28322  static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
28323 @@ -108,134 +96,139 @@ static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
28324  }
28325  
28326  
28327 -static void* create_srp_semaphore(void)
28328 +static int lock_srp_semaphore(struct litmus_lock* l)
28329  {
28330 -	struct srp_semaphore* sem;
28331 +	struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
28332  
28333 -	sem = kmalloc(sizeof(*sem), GFP_KERNEL);
28334 -	if (!sem)
28335 -		return NULL;
28336 +	if (!is_realtime(current))
28337 +		return -EPERM;
28338  
28339 -	INIT_LIST_HEAD(&sem->ceiling.list);
28340 -	sem->ceiling.period = 0;
28341 -	sem->cpu     = UNDEF_SEM;
28342 -	sem->owner   = NULL;
28343 -	atomic_inc(&srp_objects_in_use);
28344 -	return sem;
28345 +	preempt_disable();
28346 +
28347 +	/* Update ceiling. */
28348 +	srp_add_prio(&__get_cpu_var(srp), &sem->ceiling);
28349 +
28350 +	/* SRP invariant: all resources available */
28351 +	BUG_ON(sem->owner != NULL);
28352 +
28353 +	sem->owner = current;
28354 +	TRACE_CUR("acquired srp 0x%p\n", sem);
28355 +
28356 +	preempt_enable();
28357 +
28358 +	return 0;
28359  }
28360  
28361 -static noinline int open_srp_semaphore(struct od_table_entry* entry, void* __user arg)
28362 +static int unlock_srp_semaphore(struct litmus_lock* l)
28363  {
28364 -	struct srp_semaphore* sem = (struct srp_semaphore*) entry->obj->obj;
28365 -	int ret = 0;
28366 -	struct task_struct* t = current;
28367 -	struct srp_priority t_prio;
28368 +	struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
28369 +	int err = 0;
28370  
28371 -	TRACE("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu);
28372 -	if (!srp_active())
28373 -		return -EBUSY;
28374 +	preempt_disable();
28375  
28376 -	if (sem->cpu == UNDEF_SEM)
28377 -		sem->cpu = get_partition(t);
28378 -	else if (sem->cpu != get_partition(t))
28379 -		ret = -EPERM;
28380 +	if (sem->owner != current) {
28381 +		err = -EINVAL;
28382 +	} else {
28383 +		/* Determine new system priority ceiling for this CPU. */
28384 +		BUG_ON(!in_list(&sem->ceiling.list));
28385  
28386 -	if (ret == 0) {
28387 -		t_prio.period = get_rt_period(t);
28388 -		t_prio.pid    = t->pid;
28389 -		if (srp_higher_prio(&t_prio, &sem->ceiling)) {
28390 -			sem->ceiling.period = t_prio.period;
28391 -			sem->ceiling.pid    = t_prio.pid;
28392 -		}
28393 +		list_del(&sem->ceiling.list);
28394 +		sem->owner = NULL;
28395 +
28396 +		/* Wake tasks on this CPU, if they exceed current ceiling. */
28397 +		TRACE_CUR("released srp 0x%p\n", sem);
28398 +		wake_up_all(&__get_cpu_var(srp).ceiling_blocked);
28399  	}
28400  
28401 -	return ret;
28402 +	preempt_enable();
28403 +	return err;
28404  }
28405  
28406 -static void destroy_srp_semaphore(void* sem)
28407 +static int open_srp_semaphore(struct litmus_lock* l, void* __user arg)
28408  {
28409 -	/* XXX invariants */
28410 -	atomic_dec(&srp_objects_in_use);
28411 -	kfree(sem);
28412 -}
28413 +	struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
28414 +	int err = 0;
28415 +	struct task_struct* t = current;
28416 +	struct srp_priority t_prio;
28417  
28418 -struct fdso_ops srp_sem_ops = {
28419 -	.create  = create_srp_semaphore,
28420 -	.open    = open_srp_semaphore,
28421 -	.destroy = destroy_srp_semaphore
28422 -};
28423 +	if (!is_realtime(t))
28424 +		return -EPERM;
28425  
28426 +	TRACE_CUR("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu);
28427  
28428 -static void do_srp_down(struct srp_semaphore* sem)
28429 -{
28430 -	/* Update ceiling. */
28431 -	srp_add_prio(&__get_cpu_var(srp), &sem->ceiling);
28432 -	WARN_ON(sem->owner != NULL);
28433 -	sem->owner = current;
28434 -	TRACE_CUR("acquired srp 0x%p\n", sem);
28435 -}
28436 +	preempt_disable();
28437  
28438 -static void do_srp_up(struct srp_semaphore* sem)
28439 -{
28440 -	/* Determine new system priority ceiling for this CPU. */
28441 -	WARN_ON(!in_list(&sem->ceiling.list));
28442 -	if (in_list(&sem->ceiling.list))
28443 -		list_del(&sem->ceiling.list);
28444 +	if (sem->owner != NULL)
28445 +		err = -EBUSY;
28446 +
28447 +	if (err == 0) {
28448 +		if (sem->cpu == UNDEF_SEM)
28449 +			sem->cpu = get_partition(t);
28450 +		else if (sem->cpu != get_partition(t))
28451 +			err = -EPERM;
28452 +	}
28453 +
28454 +	if (err == 0) {
28455 +		t_prio.priority = get_srp_prio(t);
28456 +		t_prio.pid      = t->pid;
28457 +		if (srp_higher_prio(&t_prio, &sem->ceiling)) {
28458 +			sem->ceiling.priority = t_prio.priority;
28459 +			sem->ceiling.pid      = t_prio.pid;
28460 +		}
28461 +	}
28462  
28463 -	sem->owner = NULL;
28464 +	preempt_enable();
28465  
28466 -	/* Wake tasks on this CPU, if they exceed current ceiling. */
28467 -	TRACE_CUR("released srp 0x%p\n", sem);
28468 -	wake_up_all(&__get_cpu_var(srp).ceiling_blocked);
28469 +	return err;
28470  }
28471  
28472 -/* Adjust the system-wide priority ceiling if resource is claimed. */
28473 -asmlinkage long sys_srp_down(int sem_od)
28474 +static int close_srp_semaphore(struct litmus_lock* l)
28475  {
28476 -	int cpu;
28477 -	int ret = -EINVAL;
28478 -	struct srp_semaphore* sem;
28479 +	struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
28480 +	int err = 0;
28481  
28482 -	/* disabling preemptions is sufficient protection since
28483 -	 * SRP is strictly per CPU and we don't interfere with any
28484 -	 * interrupt handlers
28485 -	 */
28486  	preempt_disable();
28487 -	TS_SRP_DOWN_START;
28488  
28489 -	cpu = smp_processor_id();
28490 -	sem = lookup_srp_sem(sem_od);
28491 -	if (sem && sem->cpu == cpu) {
28492 -		do_srp_down(sem);
28493 -		ret = 0;
28494 -	}
28495 +	if (sem->owner == current)
28496 +		unlock_srp_semaphore(l);
28497  
28498 -	TS_SRP_DOWN_END;
28499  	preempt_enable();
28500 -	return ret;
28501 +
28502 +	return err;
28503  }
28504  
28505 -/* Adjust the system-wide priority ceiling if resource is freed. */
28506 -asmlinkage long sys_srp_up(int sem_od)
28507 +static void deallocate_srp_semaphore(struct litmus_lock* l)
28508 +{
28509 +	struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
28510 +	atomic_dec(&srp_objects_in_use);
28511 +	kfree(sem);
28512 +}
28513 +
28514 +static struct litmus_lock_ops srp_lock_ops = {
28515 +	.open   = open_srp_semaphore,
28516 +	.close  = close_srp_semaphore,
28517 +	.lock   = lock_srp_semaphore,
28518 +	.unlock = unlock_srp_semaphore,
28519 +	.deallocate = deallocate_srp_semaphore,
28520 +};
28521 +
28522 +struct srp_semaphore* allocate_srp_semaphore(void)
28523  {
28524 -	int cpu;
28525 -	int ret = -EINVAL;
28526  	struct srp_semaphore* sem;
28527  
28528 -	preempt_disable();
28529 -	TS_SRP_UP_START;
28530 +	sem = kmalloc(sizeof(*sem), GFP_KERNEL);
28531 +	if (!sem)
28532 +		return NULL;
28533  
28534 -	cpu = smp_processor_id();
28535 -	sem = lookup_srp_sem(sem_od);
28536 +	INIT_LIST_HEAD(&sem->ceiling.list);
28537 +	sem->ceiling.priority = 0;
28538 +	sem->cpu     = UNDEF_SEM;
28539 +	sem->owner   = NULL;
28540  
28541 -	if (sem && sem->cpu == cpu) {
28542 -		do_srp_up(sem);
28543 -		ret = 0;
28544 -	}
28545 +	sem->litmus_lock.ops = &srp_lock_ops;
28546  
28547 -	TS_SRP_UP_END;
28548 -	preempt_enable();
28549 -	return ret;
28550 +	atomic_inc(&srp_objects_in_use);
28551 +	return sem;
28552  }
28553  
28554  static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync,
28555 @@ -251,8 +244,6 @@ static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync,
28556  	return 0;
28557  }
28558  
28559 -
28560 -
28561  static void do_ceiling_block(struct task_struct *tsk)
28562  {
28563  	wait_queue_t wait = {
28564 @@ -272,6 +263,7 @@ static void do_ceiling_block(struct task_struct *tsk)
28565  }
28566  
28567  /* Wait for current task priority to exceed system-wide priority ceiling.
28568 + * FIXME: the hotpath should be inline.
28569   */
28570  void srp_ceiling_block(void)
28571  {
28572 @@ -300,19 +292,4 @@ void srp_ceiling_block(void)
28573  	preempt_enable();
28574  }
28575  
28576 -
28577 -#else
28578 -
28579 -asmlinkage long sys_srp_down(int sem_od)
28580 -{
28581 -	return -ENOSYS;
28582 -}
28583 -
28584 -asmlinkage long sys_srp_up(int sem_od)
28585 -{
28586 -	return -ENOSYS;
28587 -}
28588 -
28589 -struct fdso_ops srp_sem_ops = {};
28590 -
28591  #endif
28592 diff --git a/litmus/trace.c b/litmus/trace.c
28593 index e7ea1c2..209524f 100644
28594 --- a/litmus/trace.c
28595 +++ b/litmus/trace.c
28596 @@ -5,6 +5,10 @@
28597  #include <litmus/litmus.h>
28598  #include <litmus/trace.h>
28599  
28600 +#include <litmus/domain.h>
28601 +#include <litmus/event_group.h>
28602 +#include <litmus/sched_mc.h>
28603 +
28604  /******************************************************************************/
28605  /*                          Allocation                                        */
28606  /******************************************************************************/
28607 @@ -37,6 +41,36 @@ static inline void __save_timestamp(unsigned long event,
28608  	__save_timestamp_cpu(event, type, raw_smp_processor_id());
28609  }
28610  
28611 +/* hack: fake timestamp to user-reported time, and record parts of the PID */
28612 +feather_callback void save_timestamp_time(unsigned long event, unsigned long ptr)
28613 +{
28614 +	uint64_t* time = (uint64_t*) ptr;
28615 +	unsigned int seq_no;
28616 +	struct timestamp *ts;
28617 +	seq_no = fetch_and_inc((int *) &ts_seq_no);
28618 +	if (ft_buffer_start_write(trace_ts_buf, (void**)  &ts)) {
28619 +		ts->event     = event;
28620 +		ts->timestamp = *time;
28621 +		ts->seq_no    = seq_no;
28622 +		/* type takes lowest byte of PID */
28623 +		ts->task_type = (uint8_t) current->pid;
28624 +		/* cpu takes second-lowest byte of PID*/
28625 +		ts->cpu       = (uint8_t) (current->pid >> 8);
28626 +
28627 +		ft_buffer_finish_write(trace_ts_buf, ts);
28628 +	}
28629 +}
28630 +
28631 +feather_callback void save_timestamp_pid(unsigned long event)
28632 +{
28633 +	/* Abuse existing fields to partially export PID. */
28634 +	__save_timestamp_cpu(event,
28635 +			     /* type takes lowest byte of PID */
28636 +			     (uint8_t) current->pid,
28637 +			     /* cpu takes second-lowest byte of PID*/
28638 +			     (uint8_t) (current->pid >> 8));
28639 +}
28640 +
28641  feather_callback void save_timestamp(unsigned long event)
28642  {
28643  	__save_timestamp(event, TSK_UNKNOWN);
28644 @@ -51,8 +85,21 @@ feather_callback void save_timestamp_def(unsigned long event,
28645  feather_callback void save_timestamp_task(unsigned long event,
28646  					  unsigned long t_ptr)
28647  {
28648 -	int rt = is_realtime((struct task_struct *) t_ptr);
28649 -	__save_timestamp(event, rt ? TSK_RT : TSK_BE);
28650 +	struct task_struct *ts = (struct task_struct*) t_ptr;
28651 +	int rt = is_realtime(ts);
28652 +	uint8_t type = rt ? TSK_RT : TSK_BE;
28653 +
28654 +	if (TS_LVLA_SCHED_END_ID == event) {
28655 +		if (rt && CRIT_LEVEL_A == tsk_mc_crit(ts))
28656 +			type = TSK_LVLA;
28657 +	} else if (TS_LVLB_SCHED_END_ID == event) {
28658 +		if (rt && CRIT_LEVEL_B == tsk_mc_crit(ts))
28659 +			type = TSK_LVLB;
28660 +	} else if (TS_LVLC_SCHED_END_ID == event) {
28661 +		if (rt && CRIT_LEVEL_C == tsk_mc_crit(ts))
28662 +			type = TSK_LVLC;
28663 +	}
28664 +	__save_timestamp(event, type);
28665  }
28666  
28667  feather_callback void save_timestamp_cpu(unsigned long event,
28668 @@ -69,7 +116,7 @@ feather_callback void save_timestamp_cpu(unsigned long event,
28669   * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER)
28670   * and we might not get as much
28671   */
28672 -#define NO_TIMESTAMPS (2 << 11)
28673 +#define NO_TIMESTAMPS (2 << 13)
28674  
28675  static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
28676  {
28677 diff --git a/mm/filemap.c b/mm/filemap.c
28678 index 3d4df44..9701a50 100644
28679 --- a/mm/filemap.c
28680 +++ b/mm/filemap.c
28681 @@ -631,7 +631,9 @@ repeat:
28682  	pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
28683  	if (pagep) {
28684  		page = radix_tree_deref_slot(pagep);
28685 -		if (unlikely(!page || page == RADIX_TREE_RETRY))
28686 +		if (unlikely(!page))
28687 +			goto out;
28688 +		if (radix_tree_deref_retry(page))
28689  			goto repeat;
28690  
28691  		if (!page_cache_get_speculative(page))
28692 @@ -647,6 +649,7 @@ repeat:
28693  			goto repeat;
28694  		}
28695  	}
28696 +out:
28697  	rcu_read_unlock();
28698  
28699  	return page;
28700 @@ -764,12 +767,11 @@ repeat:
28701  		page = radix_tree_deref_slot((void **)pages[i]);
28702  		if (unlikely(!page))
28703  			continue;
28704 -		/*
28705 -		 * this can only trigger if nr_found == 1, making livelock
28706 -		 * a non issue.
28707 -		 */
28708 -		if (unlikely(page == RADIX_TREE_RETRY))
28709 +		if (radix_tree_deref_retry(page)) {
28710 +			if (ret)
28711 +				start = pages[ret-1]->index;
28712  			goto restart;
28713 +		}
28714  
28715  		if (!page_cache_get_speculative(page))
28716  			goto repeat;
28717 @@ -817,11 +819,7 @@ repeat:
28718  		page = radix_tree_deref_slot((void **)pages[i]);
28719  		if (unlikely(!page))
28720  			continue;
28721 -		/*
28722 -		 * this can only trigger if nr_found == 1, making livelock
28723 -		 * a non issue.
28724 -		 */
28725 -		if (unlikely(page == RADIX_TREE_RETRY))
28726 +		if (radix_tree_deref_retry(page))
28727  			goto restart;
28728  
28729  		if (page->mapping == NULL || page->index != index)
28730 @@ -874,11 +872,7 @@ repeat:
28731  		page = radix_tree_deref_slot((void **)pages[i]);
28732  		if (unlikely(!page))
28733  			continue;
28734 -		/*
28735 -		 * this can only trigger if nr_found == 1, making livelock
28736 -		 * a non issue.
28737 -		 */
28738 -		if (unlikely(page == RADIX_TREE_RETRY))
28739 +		if (radix_tree_deref_retry(page))
28740  			goto restart;
28741  
28742  		if (!page_cache_get_speculative(page))
28743 @@ -1016,6 +1010,9 @@ find_page:
28744  				goto page_not_up_to_date;
28745  			if (!trylock_page(page))
28746  				goto page_not_up_to_date;
28747 +			/* Did it get truncated before we got the lock? */
28748 +			if (!page->mapping)
28749 +				goto page_not_up_to_date_locked;
28750  			if (!mapping->a_ops->is_partially_uptodate(page,
28751  								desc, offset))
28752  				goto page_not_up_to_date_locked;
28753 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
28754 index c032738..2697806 100644
28755 --- a/mm/hugetlb.c
28756 +++ b/mm/hugetlb.c
28757 @@ -2380,8 +2380,11 @@ retry_avoidcopy:
28758  	 * When the original hugepage is shared one, it does not have
28759  	 * anon_vma prepared.
28760  	 */
28761 -	if (unlikely(anon_vma_prepare(vma)))
28762 +	if (unlikely(anon_vma_prepare(vma))) {
28763 +		/* Caller expects lock to be held */
28764 +		spin_lock(&mm->page_table_lock);
28765  		return VM_FAULT_OOM;
28766 +	}
28767  
28768  	copy_huge_page(new_page, old_page, address, vma);
28769  	__SetPageUptodate(new_page);
28770 @@ -2665,7 +2668,8 @@ out_page_table_lock:
28771  		unlock_page(pagecache_page);
28772  		put_page(pagecache_page);
28773  	}
28774 -	unlock_page(page);
28775 +	if (page != pagecache_page)
28776 +		unlock_page(page);
28777  
28778  out_mutex:
28779  	mutex_unlock(&hugetlb_instantiation_mutex);
28780 diff --git a/mm/internal.h b/mm/internal.h
28781 index 6a697bb..dedb0af 100644
28782 --- a/mm/internal.h
28783 +++ b/mm/internal.h
28784 @@ -62,7 +62,7 @@ extern bool is_free_buddy_page(struct page *page);
28785   */
28786  static inline unsigned long page_order(struct page *page)
28787  {
28788 -	VM_BUG_ON(!PageBuddy(page));
28789 +	/* PageBuddy() must be checked by the caller */
28790  	return page_private(page);
28791  }
28792  
28793 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
28794 index 9be3cf8..a9a534a 100644
28795 --- a/mm/memcontrol.c
28796 +++ b/mm/memcontrol.c
28797 @@ -269,13 +269,14 @@ enum move_type {
28798  
28799  /* "mc" and its members are protected by cgroup_mutex */
28800  static struct move_charge_struct {
28801 -	spinlock_t	  lock; /* for from, to, moving_task */
28802 +	spinlock_t	  lock; /* for from, to */
28803  	struct mem_cgroup *from;
28804  	struct mem_cgroup *to;
28805  	unsigned long precharge;
28806  	unsigned long moved_charge;
28807  	unsigned long moved_swap;
28808  	struct task_struct *moving_task;	/* a task moving charges */
28809 +	struct mm_struct *mm;
28810  	wait_queue_head_t waitq;		/* a waitq for other context */
28811  } mc = {
28812  	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
28813 @@ -1646,6 +1647,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
28814  		if (likely(!ret))
28815  			return CHARGE_OK;
28816  
28817 +		res_counter_uncharge(&mem->res, csize);
28818  		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
28819  		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
28820  	} else
28821 @@ -1729,19 +1731,18 @@ again:
28822  
28823  		rcu_read_lock();
28824  		p = rcu_dereference(mm->owner);
28825 -		VM_BUG_ON(!p);
28826  		/*
28827 -		 * because we don't have task_lock(), "p" can exit while
28828 -		 * we're here. In that case, "mem" can point to root
28829 -		 * cgroup but never be NULL. (and task_struct itself is freed
28830 -		 * by RCU, cgroup itself is RCU safe.) Then, we have small
28831 -		 * risk here to get wrong cgroup. But such kind of mis-account
28832 -		 * by race always happens because we don't have cgroup_mutex().
28833 -		 * It's overkill and we allow that small race, here.
28834 +		 * Because we don't have task_lock(), "p" can exit.
28835 +		 * In that case, "mem" can point to root or p can be NULL with
28836 +		 * race with swapoff. Then, we have small risk of mis-accouning.
28837 +		 * But such kind of mis-account by race always happens because
28838 +		 * we don't have cgroup_mutex(). It's overkill and we allo that
28839 +		 * small race, here.
28840 +		 * (*) swapoff at el will charge against mm-struct not against
28841 +		 * task-struct. So, mm->owner can be NULL.
28842  		 */
28843  		mem = mem_cgroup_from_task(p);
28844 -		VM_BUG_ON(!mem);
28845 -		if (mem_cgroup_is_root(mem)) {
28846 +		if (!mem || mem_cgroup_is_root(mem)) {
28847  			rcu_read_unlock();
28848  			goto done;
28849  		}
28850 @@ -4445,7 +4446,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
28851  	unsigned long precharge;
28852  	struct vm_area_struct *vma;
28853  
28854 -	down_read(&mm->mmap_sem);
28855 +	/* We've already held the mmap_sem */
28856  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
28857  		struct mm_walk mem_cgroup_count_precharge_walk = {
28858  			.pmd_entry = mem_cgroup_count_precharge_pte_range,
28859 @@ -4457,7 +4458,6 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
28860  		walk_page_range(vma->vm_start, vma->vm_end,
28861  					&mem_cgroup_count_precharge_walk);
28862  	}
28863 -	up_read(&mm->mmap_sem);
28864  
28865  	precharge = mc.precharge;
28866  	mc.precharge = 0;
28867 @@ -4508,11 +4508,16 @@ static void mem_cgroup_clear_mc(void)
28868  
28869  		mc.moved_swap = 0;
28870  	}
28871 +	if (mc.mm) {
28872 +		up_read(&mc.mm->mmap_sem);
28873 +		mmput(mc.mm);
28874 +	}
28875  	spin_lock(&mc.lock);
28876  	mc.from = NULL;
28877  	mc.to = NULL;
28878 -	mc.moving_task = NULL;
28879  	spin_unlock(&mc.lock);
28880 +	mc.moving_task = NULL;
28881 +	mc.mm = NULL;
28882  	memcg_oom_recover(from);
28883  	memcg_oom_recover(to);
28884  	wake_up_all(&mc.waitq);
28885 @@ -4537,26 +4542,37 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
28886  			return 0;
28887  		/* We move charges only when we move a owner of the mm */
28888  		if (mm->owner == p) {
28889 +			/*
28890 +			 * We do all the move charge works under one mmap_sem to
28891 +			 * avoid deadlock with down_write(&mmap_sem)
28892 +			 * -> try_charge() -> if (mc.moving_task) -> sleep.
28893 +			 */
28894 +			down_read(&mm->mmap_sem);
28895 +
28896  			VM_BUG_ON(mc.from);
28897  			VM_BUG_ON(mc.to);
28898  			VM_BUG_ON(mc.precharge);
28899  			VM_BUG_ON(mc.moved_charge);
28900  			VM_BUG_ON(mc.moved_swap);
28901  			VM_BUG_ON(mc.moving_task);
28902 +			VM_BUG_ON(mc.mm);
28903 +
28904  			spin_lock(&mc.lock);
28905  			mc.from = from;
28906  			mc.to = mem;
28907  			mc.precharge = 0;
28908  			mc.moved_charge = 0;
28909  			mc.moved_swap = 0;
28910 -			mc.moving_task = current;
28911  			spin_unlock(&mc.lock);
28912 +			mc.moving_task = current;
28913 +			mc.mm = mm;
28914  
28915  			ret = mem_cgroup_precharge_mc(mm);
28916  			if (ret)
28917  				mem_cgroup_clear_mc();
28918 -		}
28919 -		mmput(mm);
28920 +			/* We call up_read() and mmput() in clear_mc(). */
28921 +		} else
28922 +			mmput(mm);
28923  	}
28924  	return ret;
28925  }
28926 @@ -4644,7 +4660,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
28927  	struct vm_area_struct *vma;
28928  
28929  	lru_add_drain_all();
28930 -	down_read(&mm->mmap_sem);
28931 +	/* We've already held the mmap_sem */
28932  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
28933  		int ret;
28934  		struct mm_walk mem_cgroup_move_charge_walk = {
28935 @@ -4663,7 +4679,6 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
28936  			 */
28937  			break;
28938  	}
28939 -	up_read(&mm->mmap_sem);
28940  }
28941  
28942  static void mem_cgroup_move_task(struct cgroup_subsys *ss,
28943 @@ -4672,17 +4687,11 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
28944  				struct task_struct *p,
28945  				bool threadgroup)
28946  {
28947 -	struct mm_struct *mm;
28948 -
28949 -	if (!mc.to)
28950 +	if (!mc.mm)
28951  		/* no need to move charge */
28952  		return;
28953  
28954 -	mm = get_task_mm(p);
28955 -	if (mm) {
28956 -		mem_cgroup_move_charge(mm);
28957 -		mmput(mm);
28958 -	}
28959 +	mem_cgroup_move_charge(mc.mm);
28960  	mem_cgroup_clear_mc();
28961  }
28962  #else	/* !CONFIG_MMU */
28963 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
28964 index dd186c1..6345dfe 100644
28965 --- a/mm/memory_hotplug.c
28966 +++ b/mm/memory_hotplug.c
28967 @@ -659,7 +659,7 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
28968   * Scanning pfn is much easier than scanning lru list.
28969   * Scan pfn from start to end and Find LRU page.
28970   */
28971 -int scan_lru_pages(unsigned long start, unsigned long end)
28972 +unsigned long scan_lru_pages(unsigned long start, unsigned long end)
28973  {
28974  	unsigned long pfn;
28975  	struct page *page;
28976 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
28977 index f969da5..c1002c6 100644
28978 --- a/mm/mempolicy.c
28979 +++ b/mm/mempolicy.c
28980 @@ -1588,7 +1588,7 @@ unsigned slab_node(struct mempolicy *policy)
28981  		(void)first_zones_zonelist(zonelist, highest_zoneidx,
28982  							&policy->v.nodes,
28983  							&zone);
28984 -		return zone->node;
28985 +		return zone ? zone->node : numa_node_id();
28986  	}
28987  
28988  	default:
28989 diff --git a/mm/migrate.c b/mm/migrate.c
28990 index 38e7cad..2cfa9bf 100644
28991 --- a/mm/migrate.c
28992 +++ b/mm/migrate.c
28993 @@ -553,7 +553,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
28994  	int *result = NULL;
28995  	struct page *newpage = get_new_page(page, private, &result);
28996  	int remap_swapcache = 1;
28997 -	int rcu_locked = 0;
28998  	int charge = 0;
28999  	struct mem_cgroup *mem = NULL;
29000  	struct anon_vma *anon_vma = NULL;
29001 @@ -605,20 +604,26 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
29002  	/*
29003  	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
29004  	 * we cannot notice that anon_vma is freed while we migrates a page.
29005 -	 * This rcu_read_lock() delays freeing anon_vma pointer until the end
29006 +	 * This get_anon_vma() delays freeing anon_vma pointer until the end
29007  	 * of migration. File cache pages are no problem because of page_lock()
29008  	 * File Caches may use write_page() or lock_page() in migration, then,
29009  	 * just care Anon page here.
29010  	 */
29011  	if (PageAnon(page)) {
29012 -		rcu_read_lock();
29013 -		rcu_locked = 1;
29014 -
29015 -		/* Determine how to safely use anon_vma */
29016 -		if (!page_mapped(page)) {
29017 -			if (!PageSwapCache(page))
29018 -				goto rcu_unlock;
29019 -
29020 +		/*
29021 +		 * Only page_lock_anon_vma() understands the subtleties of
29022 +		 * getting a hold on an anon_vma from outside one of its mms.
29023 +		 */
29024 +		anon_vma = page_lock_anon_vma(page);
29025 +		if (anon_vma) {
29026 +			/*
29027 +			 * Take a reference count on the anon_vma if the
29028 +			 * page is mapped so that it is guaranteed to
29029 +			 * exist when the page is remapped later
29030 +			 */
29031 +			get_anon_vma(anon_vma);
29032 +			page_unlock_anon_vma(anon_vma);
29033 +		} else if (PageSwapCache(page)) {
29034  			/*
29035  			 * We cannot be sure that the anon_vma of an unmapped
29036  			 * swapcache page is safe to use because we don't
29037 @@ -633,13 +638,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
29038  			 */
29039  			remap_swapcache = 0;
29040  		} else {
29041 -			/*
29042 -			 * Take a reference count on the anon_vma if the
29043 -			 * page is mapped so that it is guaranteed to
29044 -			 * exist when the page is remapped later
29045 -			 */
29046 -			anon_vma = page_anon_vma(page);
29047 -			get_anon_vma(anon_vma);
29048 +			goto uncharge;
29049  		}
29050  	}
29051  
29052 @@ -656,16 +655,10 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
29053  	 * free the metadata, so the page can be freed.
29054  	 */
29055  	if (!page->mapping) {
29056 -		if (!PageAnon(page) && page_has_private(page)) {
29057 -			/*
29058 -			 * Go direct to try_to_free_buffers() here because
29059 -			 * a) that's what try_to_release_page() would do anyway
29060 -			 * b) we may be under rcu_read_lock() here, so we can't
29061 -			 *    use GFP_KERNEL which is what try_to_release_page()
29062 -			 *    needs to be effective.
29063 -			 */
29064 +		VM_BUG_ON(PageAnon(page));
29065 +		if (page_has_private(page)) {
29066  			try_to_free_buffers(page);
29067 -			goto rcu_unlock;
29068 +			goto uncharge;
29069  		}
29070  		goto skip_unmap;
29071  	}
29072 @@ -679,14 +672,11 @@ skip_unmap:
29073  
29074  	if (rc && remap_swapcache)
29075  		remove_migration_ptes(page, page);
29076 -rcu_unlock:
29077  
29078  	/* Drop an anon_vma reference if we took one */
29079  	if (anon_vma)
29080  		drop_anon_vma(anon_vma);
29081  
29082 -	if (rcu_locked)
29083 -		rcu_read_unlock();
29084  uncharge:
29085  	if (!charge)
29086  		mem_cgroup_end_migration(mem, page, newpage);
29087 diff --git a/mm/mmap.c b/mm/mmap.c
29088 index 00161a4..283a0a8 100644
29089 --- a/mm/mmap.c
29090 +++ b/mm/mmap.c
29091 @@ -2460,6 +2460,7 @@ int install_special_mapping(struct mm_struct *mm,
29092  			    unsigned long addr, unsigned long len,
29093  			    unsigned long vm_flags, struct page **pages)
29094  {
29095 +	int ret;
29096  	struct vm_area_struct *vma;
29097  
29098  	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
29099 @@ -2477,16 +2478,23 @@ int install_special_mapping(struct mm_struct *mm,
29100  	vma->vm_ops = &special_mapping_vmops;
29101  	vma->vm_private_data = pages;
29102  
29103 -	if (unlikely(insert_vm_struct(mm, vma))) {
29104 -		kmem_cache_free(vm_area_cachep, vma);
29105 -		return -ENOMEM;
29106 -	}
29107 +	ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
29108 +	if (ret)
29109 +		goto out;
29110 +
29111 +	ret = insert_vm_struct(mm, vma);
29112 +	if (ret)
29113 +		goto out;
29114  
29115  	mm->total_vm += len >> PAGE_SHIFT;
29116  
29117  	perf_event_mmap(vma);
29118  
29119  	return 0;
29120 +
29121 +out:
29122 +	kmem_cache_free(vm_area_cachep, vma);
29123 +	return ret;
29124  }
29125  
29126  static DEFINE_MUTEX(mm_all_locks_mutex);
29127 diff --git a/mm/mmzone.c b/mm/mmzone.c
29128 index e35bfb8..f5b7d17 100644
29129 --- a/mm/mmzone.c
29130 +++ b/mm/mmzone.c
29131 @@ -87,24 +87,3 @@ int memmap_valid_within(unsigned long pfn,
29132  	return 1;
29133  }
29134  #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
29135 -
29136 -#ifdef CONFIG_SMP
29137 -/* Called when a more accurate view of NR_FREE_PAGES is needed */
29138 -unsigned long zone_nr_free_pages(struct zone *zone)
29139 -{
29140 -	unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
29141 -
29142 -	/*
29143 -	 * While kswapd is awake, it is considered the zone is under some
29144 -	 * memory pressure. Under pressure, there is a risk that
29145 -	 * per-cpu-counter-drift will allow the min watermark to be breached
29146 -	 * potentially causing a live-lock. While kswapd is awake and
29147 -	 * free pages are low, get a better estimate for free pages
29148 -	 */
29149 -	if (nr_free_pages < zone->percpu_drift_mark &&
29150 -			!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
29151 -		return zone_page_state_snapshot(zone, NR_FREE_PAGES);
29152 -
29153 -	return nr_free_pages;
29154 -}
29155 -#endif /* CONFIG_SMP */
29156 diff --git a/mm/mprotect.c b/mm/mprotect.c
29157 index 2d1bf7c..4c51338 100644
29158 --- a/mm/mprotect.c
29159 +++ b/mm/mprotect.c
29160 @@ -211,6 +211,7 @@ success:
29161  	mmu_notifier_invalidate_range_end(mm, start, end);
29162  	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
29163  	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
29164 +	perf_event_mmap(vma);
29165  	return 0;
29166  
29167  fail:
29168 @@ -299,7 +300,6 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
29169  		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
29170  		if (error)
29171  			goto out;
29172 -		perf_event_mmap(vma);
29173  		nstart = tmp;
29174  
29175  		if (nstart < prev->vm_end)
29176 diff --git a/mm/nommu.c b/mm/nommu.c
29177 index 88ff091..acb3bd3 100644
29178 --- a/mm/nommu.c
29179 +++ b/mm/nommu.c
29180 @@ -1668,6 +1668,7 @@ void exit_mmap(struct mm_struct *mm)
29181  		mm->mmap = vma->vm_next;
29182  		delete_vma_from_mm(vma);
29183  		delete_vma(mm, vma);
29184 +		cond_resched();
29185  	}
29186  
29187  	kleave("");
29188 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
29189 index f12ad18..985e072 100644
29190 --- a/mm/page_alloc.c
29191 +++ b/mm/page_alloc.c
29192 @@ -103,19 +103,24 @@ gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
29193   * only be modified with pm_mutex held, unless the suspend/hibernate code is
29194   * guaranteed not to run in parallel with that modification).
29195   */
29196 -void set_gfp_allowed_mask(gfp_t mask)
29197 +
29198 +static gfp_t saved_gfp_mask;
29199 +
29200 +void pm_restore_gfp_mask(void)
29201  {
29202  	WARN_ON(!mutex_is_locked(&pm_mutex));
29203 -	gfp_allowed_mask = mask;
29204 +	if (saved_gfp_mask) {
29205 +		gfp_allowed_mask = saved_gfp_mask;
29206 +		saved_gfp_mask = 0;
29207 +	}
29208  }
29209  
29210 -gfp_t clear_gfp_allowed_mask(gfp_t mask)
29211 +void pm_restrict_gfp_mask(void)
29212  {
29213 -	gfp_t ret = gfp_allowed_mask;
29214 -
29215  	WARN_ON(!mutex_is_locked(&pm_mutex));
29216 -	gfp_allowed_mask &= ~mask;
29217 -	return ret;
29218 +	WARN_ON(saved_gfp_mask);
29219 +	saved_gfp_mask = gfp_allowed_mask;
29220 +	gfp_allowed_mask &= ~GFP_IOFS;
29221  }
29222  #endif /* CONFIG_PM_SLEEP */
29223  
29224 @@ -530,7 +535,7 @@ static inline void __free_one_page(struct page *page,
29225  	 * so it's less likely to be used soon and more likely to be merged
29226  	 * as a higher order page
29227  	 */
29228 -	if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
29229 +	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
29230  		struct page *higher_page, *higher_buddy;
29231  		combined_idx = __find_combined_index(page_idx, order);
29232  		higher_page = page + combined_idx - page_idx;
29233 @@ -1454,24 +1459,24 @@ static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
29234  #endif /* CONFIG_FAIL_PAGE_ALLOC */
29235  
29236  /*
29237 - * Return 1 if free pages are above 'mark'. This takes into account the order
29238 + * Return true if free pages are above 'mark'. This takes into account the order
29239   * of the allocation.
29240   */
29241 -int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
29242 -		      int classzone_idx, int alloc_flags)
29243 +static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
29244 +		      int classzone_idx, int alloc_flags, long free_pages)
29245  {
29246  	/* free_pages my go negative - that's OK */
29247  	long min = mark;
29248 -	long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
29249  	int o;
29250  
29251 +	free_pages -= (1 << order) + 1;
29252  	if (alloc_flags & ALLOC_HIGH)
29253  		min -= min / 2;
29254  	if (alloc_flags & ALLOC_HARDER)
29255  		min -= min / 4;
29256  
29257  	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
29258 -		return 0;
29259 +		return false;
29260  	for (o = 0; o < order; o++) {
29261  		/* At the next order, this order's pages become unavailable */
29262  		free_pages -= z->free_area[o].nr_free << o;
29263 @@ -1480,9 +1485,28 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
29264  		min >>= 1;
29265  
29266  		if (free_pages <= min)
29267 -			return 0;
29268 +			return false;
29269  	}
29270 -	return 1;
29271 +	return true;
29272 +}
29273 +
29274 +bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
29275 +		      int classzone_idx, int alloc_flags)
29276 +{
29277 +	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
29278 +					zone_page_state(z, NR_FREE_PAGES));
29279 +}
29280 +
29281 +bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
29282 +		      int classzone_idx, int alloc_flags)
29283 +{
29284 +	long free_pages = zone_page_state(z, NR_FREE_PAGES);
29285 +
29286 +	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
29287 +		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
29288 +
29289 +	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
29290 +								free_pages);
29291  }
29292  
29293  #ifdef CONFIG_NUMA
29294 @@ -2436,7 +2460,7 @@ void show_free_areas(void)
29295  			" all_unreclaimable? %s"
29296  			"\n",
29297  			zone->name,
29298 -			K(zone_nr_free_pages(zone)),
29299 +			K(zone_page_state(zone, NR_FREE_PAGES)),
29300  			K(min_wmark_pages(zone)),
29301  			K(low_wmark_pages(zone)),
29302  			K(high_wmark_pages(zone)),
29303 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
29304 index 6b8889d..d8087f0 100644
29305 --- a/mm/vmalloc.c
29306 +++ b/mm/vmalloc.c
29307 @@ -517,6 +517,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
29308  static void purge_fragmented_blocks_allcpus(void);
29309  
29310  /*
29311 + * called before a call to iounmap() if the caller wants vm_area_struct's
29312 + * immediately freed.
29313 + */
29314 +void set_iounmap_nonlazy(void)
29315 +{
29316 +	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
29317 +}
29318 +
29319 +/*
29320   * Purges all lazily-freed vmap areas.
29321   *
29322   * If sync is 0 then don't purge if there is already a purge in progress.
29323 diff --git a/mm/vmscan.c b/mm/vmscan.c
29324 index c5dfabf..3e71cb1 100644
29325 --- a/mm/vmscan.c
29326 +++ b/mm/vmscan.c
29327 @@ -2082,7 +2082,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
29328  		if (zone->all_unreclaimable)
29329  			continue;
29330  
29331 -		if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
29332 +		if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
29333  								0, 0))
29334  			return 1;
29335  	}
29336 @@ -2169,7 +2169,7 @@ loop_again:
29337  				shrink_active_list(SWAP_CLUSTER_MAX, zone,
29338  							&sc, priority, 0);
29339  
29340 -			if (!zone_watermark_ok(zone, order,
29341 +			if (!zone_watermark_ok_safe(zone, order,
29342  					high_wmark_pages(zone), 0, 0)) {
29343  				end_zone = i;
29344  				break;
29345 @@ -2215,7 +2215,7 @@ loop_again:
29346  			 * We put equal pressure on every zone, unless one
29347  			 * zone has way too many pages free already.
29348  			 */
29349 -			if (!zone_watermark_ok(zone, order,
29350 +			if (!zone_watermark_ok_safe(zone, order,
29351  					8*high_wmark_pages(zone), end_zone, 0))
29352  				shrink_zone(priority, zone, &sc);
29353  			reclaim_state->reclaimed_slab = 0;
29354 @@ -2236,7 +2236,7 @@ loop_again:
29355  			    total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
29356  				sc.may_writepage = 1;
29357  
29358 -			if (!zone_watermark_ok(zone, order,
29359 +			if (!zone_watermark_ok_safe(zone, order,
29360  					high_wmark_pages(zone), end_zone, 0)) {
29361  				all_zones_ok = 0;
29362  				/*
29363 @@ -2244,7 +2244,7 @@ loop_again:
29364  				 * means that we have a GFP_ATOMIC allocation
29365  				 * failure risk. Hurry up!
29366  				 */
29367 -				if (!zone_watermark_ok(zone, order,
29368 +				if (!zone_watermark_ok_safe(zone, order,
29369  					    min_wmark_pages(zone), end_zone, 0))
29370  					has_under_min_watermark_zone = 1;
29371  			}
29372 @@ -2378,7 +2378,9 @@ static int kswapd(void *p)
29373  				 */
29374  				if (!sleeping_prematurely(pgdat, order, remaining)) {
29375  					trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
29376 +					restore_pgdat_percpu_threshold(pgdat);
29377  					schedule();
29378 +					reduce_pgdat_percpu_threshold(pgdat);
29379  				} else {
29380  					if (remaining)
29381  						count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
29382 @@ -2417,16 +2419,17 @@ void wakeup_kswapd(struct zone *zone, int order)
29383  	if (!populated_zone(zone))
29384  		return;
29385  
29386 -	pgdat = zone->zone_pgdat;
29387 -	if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
29388 +	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
29389  		return;
29390 +	pgdat = zone->zone_pgdat;
29391  	if (pgdat->kswapd_max_order < order)
29392  		pgdat->kswapd_max_order = order;
29393 -	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
29394 -	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
29395 -		return;
29396  	if (!waitqueue_active(&pgdat->kswapd_wait))
29397  		return;
29398 +	if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
29399 +		return;
29400 +
29401 +	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
29402  	wake_up_interruptible(&pgdat->kswapd_wait);
29403  }
29404  
29405 diff --git a/mm/vmstat.c b/mm/vmstat.c
29406 index 355a9e6..4d7faeb 100644
29407 --- a/mm/vmstat.c
29408 +++ b/mm/vmstat.c
29409 @@ -81,6 +81,30 @@ EXPORT_SYMBOL(vm_stat);
29410  
29411  #ifdef CONFIG_SMP
29412  
29413 +static int calculate_pressure_threshold(struct zone *zone)
29414 +{
29415 +	int threshold;
29416 +	int watermark_distance;
29417 +
29418 +	/*
29419 +	 * As vmstats are not up to date, there is drift between the estimated
29420 +	 * and real values. For high thresholds and a high number of CPUs, it
29421 +	 * is possible for the min watermark to be breached while the estimated
29422 +	 * value looks fine. The pressure threshold is a reduced value such
29423 +	 * that even the maximum amount of drift will not accidentally breach
29424 +	 * the min watermark
29425 +	 */
29426 +	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
29427 +	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
29428 +
29429 +	/*
29430 +	 * Maximum threshold is 125
29431 +	 */
29432 +	threshold = min(125, threshold);
29433 +
29434 +	return threshold;
29435 +}
29436 +
29437  static int calculate_threshold(struct zone *zone)
29438  {
29439  	int threshold;
29440 @@ -159,6 +183,48 @@ static void refresh_zone_stat_thresholds(void)
29441  	}
29442  }
29443  
29444 +void reduce_pgdat_percpu_threshold(pg_data_t *pgdat)
29445 +{
29446 +	struct zone *zone;
29447 +	int cpu;
29448 +	int threshold;
29449 +	int i;
29450 +
29451 +	get_online_cpus();
29452 +	for (i = 0; i < pgdat->nr_zones; i++) {
29453 +		zone = &pgdat->node_zones[i];
29454 +		if (!zone->percpu_drift_mark)
29455 +			continue;
29456 +
29457 +		threshold = calculate_pressure_threshold(zone);
29458 +		for_each_online_cpu(cpu)
29459 +			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
29460 +							= threshold;
29461 +	}
29462 +	put_online_cpus();
29463 +}
29464 +
29465 +void restore_pgdat_percpu_threshold(pg_data_t *pgdat)
29466 +{
29467 +	struct zone *zone;
29468 +	int cpu;
29469 +	int threshold;
29470 +	int i;
29471 +
29472 +	get_online_cpus();
29473 +	for (i = 0; i < pgdat->nr_zones; i++) {
29474 +		zone = &pgdat->node_zones[i];
29475 +		if (!zone->percpu_drift_mark)
29476 +			continue;
29477 +
29478 +		threshold = calculate_threshold(zone);
29479 +		for_each_online_cpu(cpu)
29480 +			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
29481 +							= threshold;
29482 +	}
29483 +	put_online_cpus();
29484 +}
29485 +
29486  /*
29487   * For use when we know that interrupts are disabled.
29488   */
29489 @@ -826,7 +892,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
29490  		   "\n        scanned  %lu"
29491  		   "\n        spanned  %lu"
29492  		   "\n        present  %lu",
29493 -		   zone_nr_free_pages(zone),
29494 +		   zone_page_state(zone, NR_FREE_PAGES),
29495  		   min_wmark_pages(zone),
29496  		   low_wmark_pages(zone),
29497  		   high_wmark_pages(zone),
29498 diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
29499 index 0eb96f7..2dcff0b 100644
29500 --- a/net/8021q/vlan_core.c
29501 +++ b/net/8021q/vlan_core.c
29502 @@ -43,6 +43,9 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
29503  	struct net_device *dev = skb->dev;
29504  	struct vlan_rx_stats     *rx_stats;
29505  
29506 +	if (unlikely(!is_vlan_dev(dev)))
29507 +		return 0;
29508 +
29509  	skb->dev = vlan_dev_info(dev)->real_dev;
29510  	netif_nit_deliver(skb);
29511  
29512 diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
29513 index cfdfd7e..6e2371a 100644
29514 --- a/net/ax25/af_ax25.c
29515 +++ b/net/ax25/af_ax25.c
29516 @@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
29517  	ax25_cb *ax25;
29518  	int err = 0;
29519  
29520 +	memset(fsa, 0, sizeof(fsa));
29521  	lock_sock(sk);
29522  	ax25 = ax25_sk(sk);
29523  
29524 @@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
29525  
29526  		fsa->fsa_ax25.sax25_family = AF_AX25;
29527  		fsa->fsa_ax25.sax25_call   = ax25->dest_addr;
29528 -		fsa->fsa_ax25.sax25_ndigis = 0;
29529  
29530  		if (ax25->digipeat != NULL) {
29531  			ndigi = ax25->digipeat->ndigi;
29532 diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
29533 index 0b54b7d..dc60205 100644
29534 --- a/net/bluetooth/l2cap.c
29535 +++ b/net/bluetooth/l2cap.c
29536 @@ -2891,7 +2891,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
29537  	struct l2cap_chan_list *list = &conn->chan_list;
29538  	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
29539  	struct l2cap_conn_rsp rsp;
29540 -	struct sock *parent, *uninitialized_var(sk);
29541 +	struct sock *parent, *sk = NULL;
29542  	int result, status = L2CAP_CS_NO_INFO;
29543  
29544  	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
29545 @@ -3000,7 +3000,7 @@ sendresp:
29546  					L2CAP_INFO_REQ, sizeof(info), &info);
29547  	}
29548  
29549 -	if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
29550 +	if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
29551  				result == L2CAP_CR_SUCCESS) {
29552  		u8 buf[128];
29553  		l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
29554 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
29555 index eb5b256..f19e347 100644
29556 --- a/net/bridge/br_multicast.c
29557 +++ b/net/bridge/br_multicast.c
29558 @@ -437,7 +437,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
29559  	ip6h = ipv6_hdr(skb);
29560  
29561  	*(__force __be32 *)ip6h = htonl(0x60000000);
29562 -	ip6h->payload_len = 8 + sizeof(*mldq);
29563 +	ip6h->payload_len = htons(8 + sizeof(*mldq));
29564  	ip6h->nexthdr = IPPROTO_HOPOPTS;
29565  	ip6h->hop_limit = 1;
29566  	ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
29567 diff --git a/net/can/bcm.c b/net/can/bcm.c
29568 index 08ffe9e..6faa825 100644
29569 --- a/net/can/bcm.c
29570 +++ b/net/can/bcm.c
29571 @@ -125,7 +125,7 @@ struct bcm_sock {
29572  	struct list_head tx_ops;
29573  	unsigned long dropped_usr_msgs;
29574  	struct proc_dir_entry *bcm_proc_read;
29575 -	char procname [9]; /* pointer printed in ASCII with \0 */
29576 +	char procname [20]; /* pointer printed in ASCII with \0 */
29577  };
29578  
29579  static inline struct bcm_sock *bcm_sk(const struct sock *sk)
29580 diff --git a/net/compat.c b/net/compat.c
29581 index 63d260e..3649d58 100644
29582 --- a/net/compat.c
29583 +++ b/net/compat.c
29584 @@ -41,10 +41,12 @@ static inline int iov_from_user_compat_to_kern(struct iovec *kiov,
29585  		compat_size_t len;
29586  
29587  		if (get_user(len, &uiov32->iov_len) ||
29588 -		   get_user(buf, &uiov32->iov_base)) {
29589 -			tot_len = -EFAULT;
29590 -			break;
29591 -		}
29592 +		    get_user(buf, &uiov32->iov_base))
29593 +			return -EFAULT;
29594 +
29595 +		if (len > INT_MAX - tot_len)
29596 +			len = INT_MAX - tot_len;
29597 +
29598  		tot_len += len;
29599  		kiov->iov_base = compat_ptr(buf);
29600  		kiov->iov_len = (__kernel_size_t) len;
29601 diff --git a/net/core/dev.c b/net/core/dev.c
29602 index 660dd41..1dad6c0 100644
29603 --- a/net/core/dev.c
29604 +++ b/net/core/dev.c
29605 @@ -1648,10 +1648,10 @@ EXPORT_SYMBOL(netif_device_attach);
29606  
29607  static bool can_checksum_protocol(unsigned long features, __be16 protocol)
29608  {
29609 -	return ((features & NETIF_F_GEN_CSUM) ||
29610 -		((features & NETIF_F_IP_CSUM) &&
29611 +	return ((features & NETIF_F_NO_CSUM) ||
29612 +		((features & NETIF_F_V4_CSUM) &&
29613  		 protocol == htons(ETH_P_IP)) ||
29614 -		((features & NETIF_F_IPV6_CSUM) &&
29615 +		((features & NETIF_F_V6_CSUM) &&
29616  		 protocol == htons(ETH_P_IPV6)) ||
29617  		((features & NETIF_F_FCOE_CRC) &&
29618  		 protocol == htons(ETH_P_FCOE)));
29619 @@ -2891,6 +2891,15 @@ static int __netif_receive_skb(struct sk_buff *skb)
29620  ncls:
29621  #endif
29622  
29623 +	/* If we got this far with a hardware accelerated VLAN tag, it means
29624 +	 * that we were put in promiscuous mode but nobody is interested in
29625 +	 * this vid. Drop the packet now to prevent it from getting propagated
29626 +	 * to other parts of the stack that won't know how to deal with packets
29627 +	 * tagged in this manner.
29628 +	 */
29629 +	if (unlikely(vlan_tx_tag_present(skb)))
29630 +		goto bypass;
29631 +
29632  	/* Handle special case of bridge or macvlan */
29633  	rx_handler = rcu_dereference(skb->dev->rx_handler);
29634  	if (rx_handler) {
29635 @@ -2927,6 +2936,7 @@ ncls:
29636  		}
29637  	}
29638  
29639 +bypass:
29640  	if (pt_prev) {
29641  		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
29642  	} else {
29643 diff --git a/net/core/dst.c b/net/core/dst.c
29644 index 6c41b1f..2844639 100644
29645 --- a/net/core/dst.c
29646 +++ b/net/core/dst.c
29647 @@ -343,6 +343,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
29648  
29649  static struct notifier_block dst_dev_notifier = {
29650  	.notifier_call	= dst_dev_event,
29651 +	.priority = -10, /* must be called after other network notifiers */
29652  };
29653  
29654  void __init dst_init(void)
29655 diff --git a/net/core/filter.c b/net/core/filter.c
29656 index 52b051f..71a433c 100644
29657 --- a/net/core/filter.c
29658 +++ b/net/core/filter.c
29659 @@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
29660   */
29661  unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
29662  {
29663 -	struct sock_filter *fentry;	/* We walk down these */
29664  	void *ptr;
29665  	u32 A = 0;			/* Accumulator */
29666  	u32 X = 0;			/* Index Register */
29667  	u32 mem[BPF_MEMWORDS];		/* Scratch Memory Store */
29668 +	unsigned long memvalid = 0;
29669  	u32 tmp;
29670  	int k;
29671  	int pc;
29672  
29673 +	BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
29674  	/*
29675  	 * Process array of filter instructions.
29676  	 */
29677  	for (pc = 0; pc < flen; pc++) {
29678 -		fentry = &filter[pc];
29679 +		const struct sock_filter *fentry = &filter[pc];
29680 +		u32 f_k = fentry->k;
29681  
29682  		switch (fentry->code) {
29683  		case BPF_S_ALU_ADD_X:
29684  			A += X;
29685  			continue;
29686  		case BPF_S_ALU_ADD_K:
29687 -			A += fentry->k;
29688 +			A += f_k;
29689  			continue;
29690  		case BPF_S_ALU_SUB_X:
29691  			A -= X;
29692  			continue;
29693  		case BPF_S_ALU_SUB_K:
29694 -			A -= fentry->k;
29695 +			A -= f_k;
29696  			continue;
29697  		case BPF_S_ALU_MUL_X:
29698  			A *= X;
29699  			continue;
29700  		case BPF_S_ALU_MUL_K:
29701 -			A *= fentry->k;
29702 +			A *= f_k;
29703  			continue;
29704  		case BPF_S_ALU_DIV_X:
29705  			if (X == 0)
29706 @@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
29707  			A /= X;
29708  			continue;
29709  		case BPF_S_ALU_DIV_K:
29710 -			A /= fentry->k;
29711 +			A /= f_k;
29712  			continue;
29713  		case BPF_S_ALU_AND_X:
29714  			A &= X;
29715  			continue;
29716  		case BPF_S_ALU_AND_K:
29717 -			A &= fentry->k;
29718 +			A &= f_k;
29719  			continue;
29720  		case BPF_S_ALU_OR_X:
29721  			A |= X;
29722  			continue;
29723  		case BPF_S_ALU_OR_K:
29724 -			A |= fentry->k;
29725 +			A |= f_k;
29726  			continue;
29727  		case BPF_S_ALU_LSH_X:
29728  			A <<= X;
29729  			continue;
29730  		case BPF_S_ALU_LSH_K:
29731 -			A <<= fentry->k;
29732 +			A <<= f_k;
29733  			continue;
29734  		case BPF_S_ALU_RSH_X:
29735  			A >>= X;
29736  			continue;
29737  		case BPF_S_ALU_RSH_K:
29738 -			A >>= fentry->k;
29739 +			A >>= f_k;
29740  			continue;
29741  		case BPF_S_ALU_NEG:
29742  			A = -A;
29743  			continue;
29744  		case BPF_S_JMP_JA:
29745 -			pc += fentry->k;
29746 +			pc += f_k;
29747  			continue;
29748  		case BPF_S_JMP_JGT_K:
29749 -			pc += (A > fentry->k) ? fentry->jt : fentry->jf;
29750 +			pc += (A > f_k) ? fentry->jt : fentry->jf;
29751  			continue;
29752  		case BPF_S_JMP_JGE_K:
29753 -			pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
29754 +			pc += (A >= f_k) ? fentry->jt : fentry->jf;
29755  			continue;
29756  		case BPF_S_JMP_JEQ_K:
29757 -			pc += (A == fentry->k) ? fentry->jt : fentry->jf;
29758 +			pc += (A == f_k) ? fentry->jt : fentry->jf;
29759  			continue;
29760  		case BPF_S_JMP_JSET_K:
29761 -			pc += (A & fentry->k) ? fentry->jt : fentry->jf;
29762 +			pc += (A & f_k) ? fentry->jt : fentry->jf;
29763  			continue;
29764  		case BPF_S_JMP_JGT_X:
29765  			pc += (A > X) ? fentry->jt : fentry->jf;
29766 @@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
29767  			pc += (A & X) ? fentry->jt : fentry->jf;
29768  			continue;
29769  		case BPF_S_LD_W_ABS:
29770 -			k = fentry->k;
29771 +			k = f_k;
29772  load_w:
29773  			ptr = load_pointer(skb, k, 4, &tmp);
29774  			if (ptr != NULL) {
29775 @@ -218,7 +220,7 @@ load_w:
29776  			}
29777  			break;
29778  		case BPF_S_LD_H_ABS:
29779 -			k = fentry->k;
29780 +			k = f_k;
29781  load_h:
29782  			ptr = load_pointer(skb, k, 2, &tmp);
29783  			if (ptr != NULL) {
29784 @@ -227,7 +229,7 @@ load_h:
29785  			}
29786  			break;
29787  		case BPF_S_LD_B_ABS:
29788 -			k = fentry->k;
29789 +			k = f_k;
29790  load_b:
29791  			ptr = load_pointer(skb, k, 1, &tmp);
29792  			if (ptr != NULL) {
29793 @@ -242,32 +244,34 @@ load_b:
29794  			X = skb->len;
29795  			continue;
29796  		case BPF_S_LD_W_IND:
29797 -			k = X + fentry->k;
29798 +			k = X + f_k;
29799  			goto load_w;
29800  		case BPF_S_LD_H_IND:
29801 -			k = X + fentry->k;
29802 +			k = X + f_k;
29803  			goto load_h;
29804  		case BPF_S_LD_B_IND:
29805 -			k = X + fentry->k;
29806 +			k = X + f_k;
29807  			goto load_b;
29808  		case BPF_S_LDX_B_MSH:
29809 -			ptr = load_pointer(skb, fentry->k, 1, &tmp);
29810 +			ptr = load_pointer(skb, f_k, 1, &tmp);
29811  			if (ptr != NULL) {
29812  				X = (*(u8 *)ptr & 0xf) << 2;
29813  				continue;
29814  			}
29815  			return 0;
29816  		case BPF_S_LD_IMM:
29817 -			A = fentry->k;
29818 +			A = f_k;
29819  			continue;
29820  		case BPF_S_LDX_IMM:
29821 -			X = fentry->k;
29822 +			X = f_k;
29823  			continue;
29824  		case BPF_S_LD_MEM:
29825 -			A = mem[fentry->k];
29826 +			A = (memvalid & (1UL << f_k)) ?
29827 +				mem[f_k] : 0;
29828  			continue;
29829  		case BPF_S_LDX_MEM:
29830 -			X = mem[fentry->k];
29831 +			X = (memvalid & (1UL << f_k)) ?
29832 +				mem[f_k] : 0;
29833  			continue;
29834  		case BPF_S_MISC_TAX:
29835  			X = A;
29836 @@ -276,14 +280,16 @@ load_b:
29837  			A = X;
29838  			continue;
29839  		case BPF_S_RET_K:
29840 -			return fentry->k;
29841 +			return f_k;
29842  		case BPF_S_RET_A:
29843  			return A;
29844  		case BPF_S_ST:
29845 -			mem[fentry->k] = A;
29846 +			memvalid |= 1UL << f_k;
29847 +			mem[f_k] = A;
29848  			continue;
29849  		case BPF_S_STX:
29850 -			mem[fentry->k] = X;
29851 +			memvalid |= 1UL << f_k;
29852 +			mem[f_k] = X;
29853  			continue;
29854  		default:
29855  			WARN_ON(1);
29856 @@ -583,23 +589,16 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
29857  EXPORT_SYMBOL(sk_chk_filter);
29858  
29859  /**
29860 - * 	sk_filter_rcu_release: Release a socket filter by rcu_head
29861 + * 	sk_filter_release_rcu - Release a socket filter by rcu_head
29862   *	@rcu: rcu_head that contains the sk_filter to free
29863   */
29864 -static void sk_filter_rcu_release(struct rcu_head *rcu)
29865 +void sk_filter_release_rcu(struct rcu_head *rcu)
29866  {
29867  	struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
29868  
29869 -	sk_filter_release(fp);
29870 -}
29871 -
29872 -static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
29873 -{
29874 -	unsigned int size = sk_filter_len(fp);
29875 -
29876 -	atomic_sub(size, &sk->sk_omem_alloc);
29877 -	call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
29878 +	kfree(fp);
29879  }
29880 +EXPORT_SYMBOL(sk_filter_release_rcu);
29881  
29882  /**
29883   *	sk_attach_filter - attach a socket filter
29884 @@ -644,7 +643,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
29885  	rcu_read_unlock_bh();
29886  
29887  	if (old_fp)
29888 -		sk_filter_delayed_uncharge(sk, old_fp);
29889 +		sk_filter_uncharge(sk, old_fp);
29890  	return 0;
29891  }
29892  EXPORT_SYMBOL_GPL(sk_attach_filter);
29893 @@ -658,7 +657,7 @@ int sk_detach_filter(struct sock *sk)
29894  	filter = rcu_dereference_bh(sk->sk_filter);
29895  	if (filter) {
29896  		rcu_assign_pointer(sk->sk_filter, NULL);
29897 -		sk_filter_delayed_uncharge(sk, filter);
29898 +		sk_filter_uncharge(sk, filter);
29899  		ret = 0;
29900  	}
29901  	rcu_read_unlock_bh();
29902 diff --git a/net/core/iovec.c b/net/core/iovec.c
29903 index e6b133b..58eb999 100644
29904 --- a/net/core/iovec.c
29905 +++ b/net/core/iovec.c
29906 @@ -35,10 +35,9 @@
29907   *	in any case.
29908   */
29909  
29910 -long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
29911 +int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
29912  {
29913 -	int size, ct;
29914 -	long err;
29915 +	int size, ct, err;
29916  
29917  	if (m->msg_namelen) {
29918  		if (mode == VERIFY_READ) {
29919 @@ -60,14 +59,13 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
29920  	err = 0;
29921  
29922  	for (ct = 0; ct < m->msg_iovlen; ct++) {
29923 -		err += iov[ct].iov_len;
29924 -		/*
29925 -		 * Goal is not to verify user data, but to prevent returning
29926 -		 * negative value, which is interpreted as errno.
29927 -		 * Overflow is still possible, but it is harmless.
29928 -		 */
29929 -		if (err < 0)
29930 -			return -EMSGSIZE;
29931 +		size_t len = iov[ct].iov_len;
29932 +
29933 +		if (len > INT_MAX - err) {
29934 +			len = INT_MAX - err;
29935 +			iov[ct].iov_len = len;
29936 +		}
29937 +		err += len;
29938  	}
29939  
29940  	return err;
29941 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
29942 index f78d821..29d7bce 100644
29943 --- a/net/core/rtnetlink.c
29944 +++ b/net/core/rtnetlink.c
29945 @@ -1546,6 +1546,9 @@ replay:
29946  			snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
29947  
29948  		dest_net = rtnl_link_get_net(net, tb);
29949 +		if (IS_ERR(dest_net))
29950 +			return PTR_ERR(dest_net);
29951 +
29952  		dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
29953  
29954  		if (IS_ERR(dev))
29955 diff --git a/net/core/timestamping.c b/net/core/timestamping.c
29956 index 0ae6c22..c19bb4e 100644
29957 --- a/net/core/timestamping.c
29958 +++ b/net/core/timestamping.c
29959 @@ -96,11 +96,13 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
29960  	struct phy_device *phydev;
29961  	unsigned int type;
29962  
29963 -	skb_push(skb, ETH_HLEN);
29964 +	if (skb_headroom(skb) < ETH_HLEN)
29965 +		return false;
29966 +	__skb_push(skb, ETH_HLEN);
29967  
29968  	type = classify(skb);
29969  
29970 -	skb_pull(skb, ETH_HLEN);
29971 +	__skb_pull(skb, ETH_HLEN);
29972  
29973  	switch (type) {
29974  	case PTP_CLASS_V1_IPV4:
29975 diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
29976 index d6b93d1..cf38f52 100644
29977 --- a/net/decnet/af_decnet.c
29978 +++ b/net/decnet/af_decnet.c
29979 @@ -1556,6 +1556,8 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
29980  			if (r_len > sizeof(struct linkinfo_dn))
29981  				r_len = sizeof(struct linkinfo_dn);
29982  
29983 +			memset(&link, 0, sizeof(link));
29984 +
29985  			switch(sock->state) {
29986  				case SS_CONNECTING:
29987  					link.idn_linkstate = LL_CONNECTING;
29988 diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
29989 index dc54bd0..172a6a9 100644
29990 --- a/net/econet/af_econet.c
29991 +++ b/net/econet/af_econet.c
29992 @@ -31,6 +31,7 @@
29993  #include <linux/skbuff.h>
29994  #include <linux/udp.h>
29995  #include <linux/slab.h>
29996 +#include <linux/vmalloc.h>
29997  #include <net/sock.h>
29998  #include <net/inet_common.h>
29999  #include <linux/stat.h>
30000 @@ -276,12 +277,12 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
30001  #endif
30002  #ifdef CONFIG_ECONET_AUNUDP
30003  	struct msghdr udpmsg;
30004 -	struct iovec iov[msg->msg_iovlen+1];
30005 +	struct iovec iov[2];
30006  	struct aunhdr ah;
30007  	struct sockaddr_in udpdest;
30008  	__kernel_size_t size;
30009 -	int i;
30010  	mm_segment_t oldfs;
30011 +	char *userbuf;
30012  #endif
30013  
30014  	/*
30015 @@ -297,23 +298,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
30016  
30017  	mutex_lock(&econet_mutex);
30018  
30019 -	if (saddr == NULL) {
30020 -		struct econet_sock *eo = ec_sk(sk);
30021 -
30022 -		addr.station = eo->station;
30023 -		addr.net     = eo->net;
30024 -		port	     = eo->port;
30025 -		cb	     = eo->cb;
30026 -	} else {
30027 -		if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
30028 -			mutex_unlock(&econet_mutex);
30029 -			return -EINVAL;
30030 -		}
30031 -		addr.station = saddr->addr.station;
30032 -		addr.net = saddr->addr.net;
30033 -		port = saddr->port;
30034 -		cb = saddr->cb;
30035 -	}
30036 +        if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
30037 +                mutex_unlock(&econet_mutex);
30038 +                return -EINVAL;
30039 +        }
30040 +        addr.station = saddr->addr.station;
30041 +        addr.net = saddr->addr.net;
30042 +        port = saddr->port;
30043 +        cb = saddr->cb;
30044  
30045  	/* Look for a device with the right network number. */
30046  	dev = net2dev_map[addr.net];
30047 @@ -328,17 +320,17 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
30048  		}
30049  	}
30050  
30051 -	if (len + 15 > dev->mtu) {
30052 -		mutex_unlock(&econet_mutex);
30053 -		return -EMSGSIZE;
30054 -	}
30055 -
30056  	if (dev->type == ARPHRD_ECONET) {
30057  		/* Real hardware Econet.  We're not worthy etc. */
30058  #ifdef CONFIG_ECONET_NATIVE
30059  		unsigned short proto = 0;
30060  		int res;
30061  
30062 +		if (len + 15 > dev->mtu) {
30063 +			mutex_unlock(&econet_mutex);
30064 +			return -EMSGSIZE;
30065 +		}
30066 +
30067  		dev_hold(dev);
30068  
30069  		skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
30070 @@ -351,7 +343,6 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
30071  
30072  		eb = (struct ec_cb *)&skb->cb;
30073  
30074 -		/* BUG: saddr may be NULL */
30075  		eb->cookie = saddr->cookie;
30076  		eb->sec = *saddr;
30077  		eb->sent = ec_tx_done;
30078 @@ -415,6 +406,11 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
30079  		return -ENETDOWN;		/* No socket - can't send */
30080  	}
30081  
30082 +	if (len > 32768) {
30083 +		err = -E2BIG;
30084 +		goto error;
30085 +	}
30086 +
30087  	/* Make up a UDP datagram and hand it off to some higher intellect. */
30088  
30089  	memset(&udpdest, 0, sizeof(udpdest));
30090 @@ -446,36 +442,26 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
30091  
30092  	/* tack our header on the front of the iovec */
30093  	size = sizeof(struct aunhdr);
30094 -	/*
30095 -	 * XXX: that is b0rken.  We can't mix userland and kernel pointers
30096 -	 * in iovec, since on a lot of platforms copy_from_user() will
30097 -	 * *not* work with the kernel and userland ones at the same time,
30098 -	 * regardless of what we do with set_fs().  And we are talking about
30099 -	 * econet-over-ethernet here, so "it's only ARM anyway" doesn't
30100 -	 * apply.  Any suggestions on fixing that code?		-- AV
30101 -	 */
30102  	iov[0].iov_base = (void *)&ah;
30103  	iov[0].iov_len = size;
30104 -	for (i = 0; i < msg->msg_iovlen; i++) {
30105 -		void __user *base = msg->msg_iov[i].iov_base;
30106 -		size_t iov_len = msg->msg_iov[i].iov_len;
30107 -		/* Check it now since we switch to KERNEL_DS later. */
30108 -		if (!access_ok(VERIFY_READ, base, iov_len)) {
30109 -			mutex_unlock(&econet_mutex);
30110 -			return -EFAULT;
30111 -		}
30112 -		iov[i+1].iov_base = base;
30113 -		iov[i+1].iov_len = iov_len;
30114 -		size += iov_len;
30115 +
30116 +	userbuf = vmalloc(len);
30117 +	if (userbuf == NULL) {
30118 +		err = -ENOMEM;
30119 +		goto error;
30120  	}
30121  
30122 +	iov[1].iov_base = userbuf;
30123 +	iov[1].iov_len = len;
30124 +	err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
30125 +	if (err)
30126 +		goto error_free_buf;
30127 +
30128  	/* Get a skbuff (no data, just holds our cb information) */
30129  	if ((skb = sock_alloc_send_skb(sk, 0,
30130  				       msg->msg_flags & MSG_DONTWAIT,
30131 -				       &err)) == NULL) {
30132 -		mutex_unlock(&econet_mutex);
30133 -		return err;
30134 -	}
30135 +				       &err)) == NULL)
30136 +		goto error_free_buf;
30137  
30138  	eb = (struct ec_cb *)&skb->cb;
30139  
30140 @@ -491,7 +477,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
30141  	udpmsg.msg_name = (void *)&udpdest;
30142  	udpmsg.msg_namelen = sizeof(udpdest);
30143  	udpmsg.msg_iov = &iov[0];
30144 -	udpmsg.msg_iovlen = msg->msg_iovlen + 1;
30145 +	udpmsg.msg_iovlen = 2;
30146  	udpmsg.msg_control = NULL;
30147  	udpmsg.msg_controllen = 0;
30148  	udpmsg.msg_flags=0;
30149 @@ -499,9 +485,13 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
30150  	oldfs = get_fs(); set_fs(KERNEL_DS);	/* More privs :-) */
30151  	err = sock_sendmsg(udpsock, &udpmsg, size);
30152  	set_fs(oldfs);
30153 +
30154 +error_free_buf:
30155 +	vfree(userbuf);
30156  #else
30157  	err = -EPROTOTYPE;
30158  #endif
30159 +	error:
30160  	mutex_unlock(&econet_mutex);
30161  
30162  	return err;
30163 @@ -671,6 +661,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
30164  	err = 0;
30165  	switch (cmd) {
30166  	case SIOCSIFADDR:
30167 +		if (!capable(CAP_NET_ADMIN)) {
30168 +			err = -EPERM;
30169 +			break;
30170 +		}
30171 +
30172  		edev = dev->ec_ptr;
30173  		if (edev == NULL) {
30174  			/* Magic up a new one. */
30175 @@ -856,9 +851,13 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
30176  {
30177  	struct iphdr *ip = ip_hdr(skb);
30178  	unsigned char stn = ntohl(ip->saddr) & 0xff;
30179 +	struct dst_entry *dst = skb_dst(skb);
30180 +	struct ec_device *edev = NULL;
30181  	struct sock *sk = NULL;
30182  	struct sk_buff *newskb;
30183 -	struct ec_device *edev = skb->dev->ec_ptr;
30184 +
30185 +	if (dst)
30186 +		edev = dst->dev->ec_ptr;
30187  
30188  	if (! edev)
30189  		goto bad;
30190 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
30191 index f115ea6..6adb1ab 100644
30192 --- a/net/ipv4/tcp.c
30193 +++ b/net/ipv4/tcp.c
30194 @@ -2246,7 +2246,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
30195  		/* Values greater than interface MTU won't take effect. However
30196  		 * at the point when this call is done we typically don't yet
30197  		 * know which interface is going to be used */
30198 -		if (val < 8 || val > MAX_TCP_WINDOW) {
30199 +		if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
30200  			err = -EINVAL;
30201  			break;
30202  		}
30203 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
30204 index 0207662..cb8d305 100644
30205 --- a/net/ipv4/tcp_ipv4.c
30206 +++ b/net/ipv4/tcp_ipv4.c
30207 @@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
30208  		    !icsk->icsk_backoff)
30209  			break;
30210  
30211 +		if (sock_owned_by_user(sk))
30212 +			break;
30213 +
30214  		icsk->icsk_backoff--;
30215  		inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
30216  					 icsk->icsk_backoff;
30217 @@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
30218  		if (remaining) {
30219  			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
30220  						  remaining, TCP_RTO_MAX);
30221 -		} else if (sock_owned_by_user(sk)) {
30222 -			/* RTO revert clocked out retransmission,
30223 -			 * but socket is locked. Will defer. */
30224 -			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
30225 -						  HZ/20, TCP_RTO_MAX);
30226  		} else {
30227  			/* RTO revert clocked out retransmission.
30228  			 * Will retransmit now */
30229 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
30230 index de3bd84..7abecf7 100644
30231 --- a/net/ipv4/tcp_output.c
30232 +++ b/net/ipv4/tcp_output.c
30233 @@ -237,11 +237,10 @@ void tcp_select_initial_window(int __space, __u32 mss,
30234  		/* when initializing use the value from init_rcv_wnd
30235  		 * rather than the default from above
30236  		 */
30237 -		if (init_rcv_wnd &&
30238 -		    (*rcv_wnd > init_rcv_wnd * mss))
30239 -			*rcv_wnd = init_rcv_wnd * mss;
30240 -		else if (*rcv_wnd > init_cwnd * mss)
30241 -			*rcv_wnd = init_cwnd * mss;
30242 +		if (init_rcv_wnd)
30243 +			*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
30244 +		else
30245 +			*rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
30246  	}
30247  
30248  	/* Set the clamp no higher than max representable value */
30249 @@ -392,27 +391,30 @@ struct tcp_out_options {
30250   */
30251  static u8 tcp_cookie_size_check(u8 desired)
30252  {
30253 -	if (desired > 0) {
30254 +	int cookie_size;
30255 +
30256 +	if (desired > 0)
30257  		/* previously specified */
30258  		return desired;
30259 -	}
30260 -	if (sysctl_tcp_cookie_size <= 0) {
30261 +
30262 +	cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
30263 +	if (cookie_size <= 0)
30264  		/* no default specified */
30265  		return 0;
30266 -	}
30267 -	if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) {
30268 +
30269 +	if (cookie_size <= TCP_COOKIE_MIN)
30270  		/* value too small, specify minimum */
30271  		return TCP_COOKIE_MIN;
30272 -	}
30273 -	if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) {
30274 +
30275 +	if (cookie_size >= TCP_COOKIE_MAX)
30276  		/* value too large, specify maximum */
30277  		return TCP_COOKIE_MAX;
30278 -	}
30279 -	if (0x1 & sysctl_tcp_cookie_size) {
30280 +
30281 +	if (cookie_size & 1)
30282  		/* 8-bit multiple, illegal, fix it */
30283 -		return (u8)(sysctl_tcp_cookie_size + 0x1);
30284 -	}
30285 -	return (u8)sysctl_tcp_cookie_size;
30286 +		cookie_size++;
30287 +
30288 +	return (u8)cookie_size;
30289  }
30290  
30291  /* Write previously computed TCP options to the packet.
30292 @@ -1519,6 +1521,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
30293  	struct tcp_sock *tp = tcp_sk(sk);
30294  	const struct inet_connection_sock *icsk = inet_csk(sk);
30295  	u32 send_win, cong_win, limit, in_flight;
30296 +	int win_divisor;
30297  
30298  	if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
30299  		goto send_now;
30300 @@ -1550,13 +1553,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
30301  	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
30302  		goto send_now;
30303  
30304 -	if (sysctl_tcp_tso_win_divisor) {
30305 +	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
30306 +	if (win_divisor) {
30307  		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
30308  
30309  		/* If at least some fraction of a window is available,
30310  		 * just use it.
30311  		 */
30312 -		chunk /= sysctl_tcp_tso_win_divisor;
30313 +		chunk /= win_divisor;
30314  		if (limit >= chunk)
30315  			goto send_now;
30316  	} else {
30317 diff --git a/net/irda/iriap.c b/net/irda/iriap.c
30318 index fce364c..5b743bd 100644
30319 --- a/net/irda/iriap.c
30320 +++ b/net/irda/iriap.c
30321 @@ -502,7 +502,8 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
30322  		IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len);
30323  
30324  		/* Make sure the string is null-terminated */
30325 -		fp[n+value_len] = 0x00;
30326 +		if (n + value_len < skb->len)
30327 +			fp[n + value_len] = 0x00;
30328  		IRDA_DEBUG(4, "Got string %s\n", fp+n);
30329  
30330  		/* Will truncate to IAS_MAX_STRING bytes */
30331 diff --git a/net/irda/parameters.c b/net/irda/parameters.c
30332 index fc1a205..71cd38c 100644
30333 --- a/net/irda/parameters.c
30334 +++ b/net/irda/parameters.c
30335 @@ -298,6 +298,8 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
30336  
30337  	p.pi = pi;     /* In case handler needs to know */
30338  	p.pl = buf[1]; /* Extract length of value */
30339 +	if (p.pl > 32)
30340 +		p.pl = 32;
30341  
30342  	IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__,
30343  		   p.pi, p.pl);
30344 @@ -318,7 +320,7 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi,
30345  		   (__u8) str[0], (__u8) str[1]);
30346  
30347  	/* Null terminate string */
30348 -	str[p.pl+1] = '\0';
30349 +	str[p.pl] = '\0';
30350  
30351  	p.pv.c = str; /* Handler will need to take a copy */
30352  
30353 diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
30354 index 226a0ae..a2cec79 100644
30355 --- a/net/l2tp/l2tp_ip.c
30356 +++ b/net/l2tp/l2tp_ip.c
30357 @@ -676,4 +676,8 @@ MODULE_LICENSE("GPL");
30358  MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
30359  MODULE_DESCRIPTION("L2TP over IP");
30360  MODULE_VERSION("1.0");
30361 -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
30362 +
30363 +/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
30364 + * enums
30365 + */
30366 +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
30367 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
30368 index 5826129..e35dbe5 100644
30369 --- a/net/llc/af_llc.c
30370 +++ b/net/llc/af_llc.c
30371 @@ -317,8 +317,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
30372  		goto out;
30373  	rc = -ENODEV;
30374  	rtnl_lock();
30375 +	rcu_read_lock();
30376  	if (sk->sk_bound_dev_if) {
30377 -		llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
30378 +		llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
30379  		if (llc->dev) {
30380  			if (!addr->sllc_arphrd)
30381  				addr->sllc_arphrd = llc->dev->type;
30382 @@ -329,13 +330,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
30383  			    !llc_mac_match(addr->sllc_mac,
30384  					   llc->dev->dev_addr)) {
30385  				rc = -EINVAL;
30386 -				dev_put(llc->dev);
30387  				llc->dev = NULL;
30388  			}
30389  		}
30390  	} else
30391  		llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
30392  					   addr->sllc_mac);
30393 +	rcu_read_unlock();
30394  	rtnl_unlock();
30395  	if (!llc->dev)
30396  		goto out;
30397 diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
30398 index 965b272..2f6903e 100644
30399 --- a/net/mac80211/agg-rx.c
30400 +++ b/net/mac80211/agg-rx.c
30401 @@ -172,8 +172,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
30402  				     struct ieee80211_mgmt *mgmt,
30403  				     size_t len)
30404  {
30405 -	struct ieee80211_hw *hw = &local->hw;
30406 -	struct ieee80211_conf *conf = &hw->conf;
30407  	struct tid_ampdu_rx *tid_agg_rx;
30408  	u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
30409  	u8 dialog_token;
30410 @@ -218,13 +216,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
30411  		goto end_no_lock;
30412  	}
30413  	/* determine default buffer size */
30414 -	if (buf_size == 0) {
30415 -		struct ieee80211_supported_band *sband;
30416 -
30417 -		sband = local->hw.wiphy->bands[conf->channel->band];
30418 -		buf_size = IEEE80211_MIN_AMPDU_BUF;
30419 -		buf_size = buf_size << sband->ht_cap.ampdu_factor;
30420 -	}
30421 +	if (buf_size == 0)
30422 +		buf_size = IEEE80211_MAX_AMPDU_BUF;
30423  
30424  
30425  	/* examine state machine */
30426 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
30427 index 29ac8e1..2095602 100644
30428 --- a/net/mac80211/cfg.c
30429 +++ b/net/mac80211/cfg.c
30430 @@ -634,6 +634,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
30431  				 struct sta_info *sta,
30432  				 struct station_parameters *params)
30433  {
30434 +	unsigned long flags;
30435  	u32 rates;
30436  	int i, j;
30437  	struct ieee80211_supported_band *sband;
30438 @@ -642,7 +643,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
30439  
30440  	sband = local->hw.wiphy->bands[local->oper_channel->band];
30441  
30442 -	spin_lock_bh(&sta->lock);
30443 +	spin_lock_irqsave(&sta->flaglock, flags);
30444  	mask = params->sta_flags_mask;
30445  	set = params->sta_flags_set;
30446  
30447 @@ -669,7 +670,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
30448  		if (set & BIT(NL80211_STA_FLAG_MFP))
30449  			sta->flags |= WLAN_STA_MFP;
30450  	}
30451 -	spin_unlock_bh(&sta->lock);
30452 +	spin_unlock_irqrestore(&sta->flaglock, flags);
30453  
30454  	/*
30455  	 * cfg80211 validates this (1-2007) and allows setting the AID
30456 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
30457 index c691780..45c99f0 100644
30458 --- a/net/mac80211/ibss.c
30459 +++ b/net/mac80211/ibss.c
30460 @@ -435,6 +435,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
30461  	if (!sta)
30462  		return NULL;
30463  
30464 +	sta->last_rx = jiffies;
30465  	set_sta_flags(sta, WLAN_STA_AUTHORIZED);
30466  
30467  	/* make sure mandatory rates are always added */
30468 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
30469 index 65e0ed6..3546054 100644
30470 --- a/net/mac80211/ieee80211_i.h
30471 +++ b/net/mac80211/ieee80211_i.h
30472 @@ -1003,6 +1003,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
30473  void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
30474  void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
30475  				  struct sk_buff *skb);
30476 +void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
30477 +void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
30478  
30479  /* IBSS code */
30480  void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
30481 diff --git a/net/mac80211/key.c b/net/mac80211/key.c
30482 index 1b9d87e..3f76484 100644
30483 --- a/net/mac80211/key.c
30484 +++ b/net/mac80211/key.c
30485 @@ -323,6 +323,12 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
30486  	if (!key)
30487  		return;
30488  
30489 +	/*
30490 +	 * Synchronize so the TX path can no longer be using
30491 +	 * this key before we free/remove it.
30492 +	 */
30493 +	synchronize_rcu();
30494 +
30495  	if (key->local)
30496  		ieee80211_key_disable_hw_accel(key);
30497  
30498 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
30499 index ded5c38..e8acdb2 100644
30500 --- a/net/mac80211/main.c
30501 +++ b/net/mac80211/main.c
30502 @@ -108,7 +108,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
30503  		chan = scan_chan;
30504  		channel_type = NL80211_CHAN_NO_HT;
30505  		local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
30506 -	} else if (local->tmp_channel) {
30507 +	} else if (local->tmp_channel &&
30508 +		   local->oper_channel != local->tmp_channel) {
30509  		chan = scan_chan = local->tmp_channel;
30510  		channel_type = local->tmp_channel_type;
30511  		local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
30512 diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
30513 index ea13a80..1c91f0f 100644
30514 --- a/net/mac80211/mesh_plink.c
30515 +++ b/net/mac80211/mesh_plink.c
30516 @@ -412,7 +412,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
30517  	enum plink_event event;
30518  	enum plink_frame_type ftype;
30519  	size_t baselen;
30520 -	bool deactivated;
30521 +	bool deactivated, matches_local = true;
30522  	u8 ie_len;
30523  	u8 *baseaddr;
30524  	__le16 plid, llid, reason;
30525 @@ -487,6 +487,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
30526  	/* Now we will figure out the appropriate event... */
30527  	event = PLINK_UNDEFINED;
30528  	if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
30529 +		matches_local = false;
30530  		switch (ftype) {
30531  		case PLINK_OPEN:
30532  			event = OPN_RJCT;
30533 @@ -498,7 +499,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
30534  			/* avoid warning */
30535  			break;
30536  		}
30537 -		spin_lock_bh(&sta->lock);
30538 +	}
30539 +
30540 +	if (!sta && !matches_local) {
30541 +		rcu_read_unlock();
30542 +		reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
30543 +		llid = 0;
30544 +		mesh_plink_frame_tx(sdata, PLINK_CLOSE, mgmt->sa, llid,
30545 +				    plid, reason);
30546 +		return;
30547  	} else if (!sta) {
30548  		/* ftype == PLINK_OPEN */
30549  		u32 rates;
30550 @@ -522,7 +531,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
30551  		}
30552  		event = OPN_ACPT;
30553  		spin_lock_bh(&sta->lock);
30554 -	} else {
30555 +	} else if (matches_local) {
30556  		spin_lock_bh(&sta->lock);
30557  		switch (ftype) {
30558  		case PLINK_OPEN:
30559 @@ -564,6 +573,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
30560  			rcu_read_unlock();
30561  			return;
30562  		}
30563 +	} else {
30564 +		spin_lock_bh(&sta->lock);
30565  	}
30566  
30567  	mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
30568 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
30569 index b6c163a..4c5eed9 100644
30570 --- a/net/mac80211/mlme.c
30571 +++ b/net/mac80211/mlme.c
30572 @@ -109,7 +109,7 @@ static void run_again(struct ieee80211_if_managed *ifmgd,
30573  		mod_timer(&ifmgd->timer, timeout);
30574  }
30575  
30576 -static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata)
30577 +void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
30578  {
30579  	if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER)
30580  		return;
30581 @@ -118,6 +118,19 @@ static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata)
30582  		  round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME));
30583  }
30584  
30585 +void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
30586 +{
30587 +	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
30588 +
30589 +	if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
30590 +		return;
30591 +
30592 +	mod_timer(&sdata->u.mgd.conn_mon_timer,
30593 +		  round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
30594 +
30595 +	ifmgd->probe_send_count = 0;
30596 +}
30597 +
30598  static int ecw2cw(int ecw)
30599  {
30600  	return (1 << ecw) - 1;
30601 @@ -1006,21 +1019,26 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
30602  	if (is_multicast_ether_addr(hdr->addr1))
30603  		return;
30604  
30605 -	if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
30606 -		return;
30607 -
30608 -	mod_timer(&sdata->u.mgd.conn_mon_timer,
30609 -		  round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
30610 +	ieee80211_sta_reset_conn_monitor(sdata);
30611  }
30612  
30613  static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
30614  {
30615  	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
30616  	const u8 *ssid;
30617 +	u8 *dst = ifmgd->associated->bssid;
30618 +	u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3);
30619 +
30620 +	/*
30621 +	 * Try sending broadcast probe requests for the last three
30622 +	 * probe requests after the first ones failed since some
30623 +	 * buggy APs only support broadcast probe requests.
30624 +	 */
30625 +	if (ifmgd->probe_send_count >= unicast_limit)
30626 +		dst = NULL;
30627  
30628  	ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
30629 -	ieee80211_send_probe_req(sdata, ifmgd->associated->bssid,
30630 -				 ssid + 2, ssid[1], NULL, 0);
30631 +	ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0);
30632  
30633  	ifmgd->probe_send_count++;
30634  	ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
30635 @@ -1262,7 +1280,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
30636  
30637  	rates = 0;
30638  	basic_rates = 0;
30639 -	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
30640 +	sband = local->hw.wiphy->bands[wk->chan->band];
30641  
30642  	for (i = 0; i < elems.supp_rates_len; i++) {
30643  		int rate = (elems.supp_rates[i] & 0x7f) * 5;
30644 @@ -1298,11 +1316,11 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
30645  		}
30646  	}
30647  
30648 -	sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
30649 +	sta->sta.supp_rates[wk->chan->band] = rates;
30650  	sdata->vif.bss_conf.basic_rates = basic_rates;
30651  
30652  	/* cf. IEEE 802.11 9.2.12 */
30653 -	if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
30654 +	if (wk->chan->band == IEEE80211_BAND_2GHZ &&
30655  	    have_higher_than_11mbit)
30656  		sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
30657  	else
30658 @@ -1362,7 +1380,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
30659  	 * Also start the timer that will detect beacon loss.
30660  	 */
30661  	ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
30662 -	mod_beacon_timer(sdata);
30663 +	ieee80211_sta_reset_beacon_monitor(sdata);
30664  
30665  	return true;
30666  }
30667 @@ -1465,7 +1483,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
30668  		 * we have or will be receiving any beacons or data, so let's
30669  		 * schedule the timers again, just in case.
30670  		 */
30671 -		mod_beacon_timer(sdata);
30672 +		ieee80211_sta_reset_beacon_monitor(sdata);
30673  
30674  		mod_timer(&ifmgd->conn_mon_timer,
30675  			  round_jiffies_up(jiffies +
30676 @@ -1540,7 +1558,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
30677  	ifmgd->last_beacon_signal = rx_status->signal;
30678  	if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
30679  		ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
30680 -		ifmgd->ave_beacon_signal = rx_status->signal;
30681 +		ifmgd->ave_beacon_signal = rx_status->signal * 16;
30682  		ifmgd->last_cqm_event_signal = 0;
30683  	} else {
30684  		ifmgd->ave_beacon_signal =
30685 @@ -1588,7 +1606,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
30686  	 * Push the beacon loss detection into the future since
30687  	 * we are processing a beacon from the AP just now.
30688  	 */
30689 -	mod_beacon_timer(sdata);
30690 +	ieee80211_sta_reset_beacon_monitor(sdata);
30691  
30692  	ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
30693  	ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
30694 diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
30695 index c36b191..cf5ee30 100644
30696 --- a/net/mac80211/offchannel.c
30697 +++ b/net/mac80211/offchannel.c
30698 @@ -22,12 +22,16 @@
30699  static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
30700  {
30701  	struct ieee80211_local *local = sdata->local;
30702 +	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
30703  
30704  	local->offchannel_ps_enabled = false;
30705  
30706  	/* FIXME: what to do when local->pspolling is true? */
30707  
30708  	del_timer_sync(&local->dynamic_ps_timer);
30709 +	del_timer_sync(&ifmgd->bcn_mon_timer);
30710 +	del_timer_sync(&ifmgd->conn_mon_timer);
30711 +
30712  	cancel_work_sync(&local->dynamic_ps_enable_work);
30713  
30714  	if (local->hw.conf.flags & IEEE80211_CONF_PS) {
30715 @@ -85,6 +89,9 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
30716  		mod_timer(&local->dynamic_ps_timer, jiffies +
30717  			  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
30718  	}
30719 +
30720 +	ieee80211_sta_reset_beacon_monitor(sdata);
30721 +	ieee80211_sta_reset_conn_monitor(sdata);
30722  }
30723  
30724  void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
30725 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
30726 index be04d461..82d5750 100644
30727 --- a/net/mac80211/rate.c
30728 +++ b/net/mac80211/rate.c
30729 @@ -328,6 +328,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
30730  		 * if needed.
30731  		 */
30732  		for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
30733 +			/* Skip invalid rates */
30734 +			if (info->control.rates[i].idx < 0)
30735 +				break;
30736  			/* Rate masking supports only legacy rates for now */
30737  			if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
30738  				continue;
30739 diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
30740 index c5b4659..2a18d66 100644
30741 --- a/net/mac80211/rc80211_minstrel_ht.c
30742 +++ b/net/mac80211/rc80211_minstrel_ht.c
30743 @@ -397,8 +397,9 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
30744  	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
30745  		return;
30746  
30747 -	if (!info->status.ampdu_len) {
30748 -		info->status.ampdu_ack_len = 1;
30749 +	if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
30750 +		info->status.ampdu_ack_len =
30751 +			(info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
30752  		info->status.ampdu_len = 1;
30753  	}
30754  
30755 @@ -426,7 +427,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
30756  		group = minstrel_ht_get_group_idx(&ar[i]);
30757  		rate = &mi->groups[group].rates[ar[i].idx % 8];
30758  
30759 -		if (last && (info->flags & IEEE80211_TX_STAT_ACK))
30760 +		if (last)
30761  			rate->success += info->status.ampdu_ack_len;
30762  
30763  		rate->attempts += ar[i].count * info->status.ampdu_len;
30764 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
30765 index 28624282..2bec9b9 100644
30766 --- a/net/mac80211/rx.c
30767 +++ b/net/mac80211/rx.c
30768 @@ -1715,6 +1715,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
30769  			if (!fwd_skb && net_ratelimit())
30770  				printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
30771  						   sdata->name);
30772 +			if (!fwd_skb)
30773 +				goto out;
30774  
30775  			fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
30776  			memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
30777 @@ -1752,6 +1754,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
30778  		}
30779  	}
30780  
30781 + out:
30782  	if (is_multicast_ether_addr(hdr->addr1) ||
30783  	    sdata->dev->flags & IFF_PROMISC)
30784  		return RX_CONTINUE;
30785 diff --git a/net/mac80211/status.c b/net/mac80211/status.c
30786 index 34da679..6ffa26a 100644
30787 --- a/net/mac80211/status.c
30788 +++ b/net/mac80211/status.c
30789 @@ -58,6 +58,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
30790  	info->control.vif = &sta->sdata->vif;
30791  	info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
30792  		       IEEE80211_TX_INTFL_RETRANSMISSION;
30793 +	info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
30794  
30795  	sta->tx_filtered_count++;
30796  
30797 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
30798 index c54db96..9d5af5d 100644
30799 --- a/net/mac80211/tx.c
30800 +++ b/net/mac80211/tx.c
30801 @@ -1694,7 +1694,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
30802  {
30803  	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
30804  	struct ieee80211_local *local = sdata->local;
30805 -	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
30806 +	struct ieee80211_tx_info *info;
30807  	int ret = NETDEV_TX_BUSY, head_need;
30808  	u16 ethertype, hdrlen,  meshhdrlen = 0;
30809  	__le16 fc;
30810 @@ -1705,15 +1705,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
30811  	int nh_pos, h_pos;
30812  	struct sta_info *sta = NULL;
30813  	u32 sta_flags = 0;
30814 +	struct sk_buff *tmp_skb;
30815  
30816  	if (unlikely(skb->len < ETH_HLEN)) {
30817  		ret = NETDEV_TX_OK;
30818  		goto fail;
30819  	}
30820  
30821 -	nh_pos = skb_network_header(skb) - skb->data;
30822 -	h_pos = skb_transport_header(skb) - skb->data;
30823 -
30824  	/* convert Ethernet header to proper 802.11 header (based on
30825  	 * operation mode) */
30826  	ethertype = (skb->data[12] << 8) | skb->data[13];
30827 @@ -1885,6 +1883,20 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
30828  		goto fail;
30829  	}
30830  
30831 +	/*
30832 +	 * If the skb is shared we need to obtain our own copy.
30833 +	 */
30834 +	if (skb_shared(skb)) {
30835 +		tmp_skb = skb;
30836 +		skb = skb_copy(skb, GFP_ATOMIC);
30837 +		kfree_skb(tmp_skb);
30838 +
30839 +		if (!skb) {
30840 +			ret = NETDEV_TX_OK;
30841 +			goto fail;
30842 +		}
30843 +	}
30844 +
30845  	hdr.frame_control = fc;
30846  	hdr.duration_id = 0;
30847  	hdr.seq_ctrl = 0;
30848 @@ -1903,6 +1915,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
30849  		encaps_len = 0;
30850  	}
30851  
30852 +	nh_pos = skb_network_header(skb) - skb->data;
30853 +	h_pos = skb_transport_header(skb) - skb->data;
30854 +
30855  	skb_pull(skb, skip_header_bytes);
30856  	nh_pos -= skip_header_bytes;
30857  	h_pos -= skip_header_bytes;
30858 @@ -1969,6 +1984,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
30859  	skb_set_network_header(skb, nh_pos);
30860  	skb_set_transport_header(skb, h_pos);
30861  
30862 +	info = IEEE80211_SKB_CB(skb);
30863  	memset(info, 0, sizeof(*info));
30864  
30865  	dev->trans_start = jiffies;
30866 @@ -2160,6 +2176,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
30867  
30868  	sdata = vif_to_sdata(vif);
30869  
30870 +	if (!ieee80211_sdata_running(sdata))
30871 +		goto out;
30872 +
30873  	if (tim_offset)
30874  		*tim_offset = 0;
30875  	if (tim_length)
30876 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
30877 index df3eedb..a37a6b1 100644
30878 --- a/net/netfilter/nf_conntrack_core.c
30879 +++ b/net/netfilter/nf_conntrack_core.c
30880 @@ -1260,7 +1260,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
30881  	if (!hash) {
30882  		*vmalloced = 1;
30883  		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
30884 -		hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
30885 +		hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
30886 +				 PAGE_KERNEL);
30887  	}
30888  
30889  	if (hash && nulls)
30890 diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
30891 index 23b2d6c..364ad16 100644
30892 --- a/net/netfilter/xt_SECMARK.c
30893 +++ b/net/netfilter/xt_SECMARK.c
30894 @@ -101,7 +101,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
30895  	switch (info->mode) {
30896  	case SECMARK_MODE_SEL:
30897  		err = checkentry_selinux(info);
30898 -		if (err <= 0)
30899 +		if (err)
30900  			return err;
30901  		break;
30902  
30903 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
30904 index 9a17f28..9ba7014 100644
30905 --- a/net/packet/af_packet.c
30906 +++ b/net/packet/af_packet.c
30907 @@ -1610,9 +1610,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
30908  
30909  		err = -EINVAL;
30910  		vnet_hdr_len = sizeof(vnet_hdr);
30911 -		if ((len -= vnet_hdr_len) < 0)
30912 +		if (len < vnet_hdr_len)
30913  			goto out_free;
30914  
30915 +		len -= vnet_hdr_len;
30916 +
30917  		if (skb_is_gso(skb)) {
30918  			struct skb_shared_info *sinfo = skb_shinfo(skb);
30919  
30920 @@ -1719,7 +1721,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
30921  	rcu_read_lock();
30922  	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
30923  	if (dev)
30924 -		strlcpy(uaddr->sa_data, dev->name, 15);
30925 +		strncpy(uaddr->sa_data, dev->name, 14);
30926  	else
30927  		memset(uaddr->sa_data, 0, 14);
30928  	rcu_read_unlock();
30929 @@ -1742,6 +1744,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
30930  	sll->sll_family = AF_PACKET;
30931  	sll->sll_ifindex = po->ifindex;
30932  	sll->sll_protocol = po->num;
30933 +	sll->sll_pkttype = 0;
30934  	rcu_read_lock();
30935  	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
30936  	if (dev) {
30937 diff --git a/net/rds/rdma.c b/net/rds/rdma.c
30938 index 75fd13b..3998967 100644
30939 --- a/net/rds/rdma.c
30940 +++ b/net/rds/rdma.c
30941 @@ -474,7 +474,7 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
30942  		goto out;
30943  	}
30944  
30945 -	if (args->nr_local > (u64)UINT_MAX) {
30946 +	if (args->nr_local > UIO_MAXIOV) {
30947  		ret = -EMSGSIZE;
30948  		goto out;
30949  	}
30950 diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
30951 index 78ef2c5..08be223 100644
30952 --- a/net/sched/cls_cgroup.c
30953 +++ b/net/sched/cls_cgroup.c
30954 @@ -34,8 +34,6 @@ struct cgroup_subsys net_cls_subsys = {
30955  	.populate	= cgrp_populate,
30956  #ifdef CONFIG_NET_CLS_CGROUP
30957  	.subsys_id	= net_cls_subsys_id,
30958 -#else
30959 -#define net_cls_subsys_id net_cls_subsys.subsys_id
30960  #endif
30961  	.module		= THIS_MODULE,
30962  };
30963 diff --git a/net/socket.c b/net/socket.c
30964 index 2270b94..58dfc91 100644
30965 --- a/net/socket.c
30966 +++ b/net/socket.c
30967 @@ -1651,6 +1651,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
30968  	struct iovec iov;
30969  	int fput_needed;
30970  
30971 +	if (len > INT_MAX)
30972 +		len = INT_MAX;
30973  	sock = sockfd_lookup_light(fd, &err, &fput_needed);
30974  	if (!sock)
30975  		goto out;
30976 @@ -1708,6 +1710,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
30977  	int err, err2;
30978  	int fput_needed;
30979  
30980 +	if (size > INT_MAX)
30981 +		size = INT_MAX;
30982  	sock = sockfd_lookup_light(fd, &err, &fput_needed);
30983  	if (!sock)
30984  		goto out;
30985 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
30986 index fa55490..cbc5b8c 100644
30987 --- a/net/sunrpc/clnt.c
30988 +++ b/net/sunrpc/clnt.c
30989 @@ -1675,7 +1675,7 @@ rpc_verify_header(struct rpc_task *task)
30990  			rpcauth_invalcred(task);
30991  			/* Ensure we obtain a new XID! */
30992  			xprt_release(task);
30993 -			task->tk_action = call_refresh;
30994 +			task->tk_action = call_reserve;
30995  			goto out_retry;
30996  		case RPC_AUTH_BADCRED:
30997  		case RPC_AUTH_BADVERF:
30998 diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
30999 index cbc0849..2f5fb71 100644
31000 --- a/net/sunrpc/svc_xprt.c
31001 +++ b/net/sunrpc/svc_xprt.c
31002 @@ -212,6 +212,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
31003  	spin_lock(&svc_xprt_class_lock);
31004  	list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
31005  		struct svc_xprt *newxprt;
31006 +		unsigned short newport;
31007  
31008  		if (strcmp(xprt_name, xcl->xcl_name))
31009  			continue;
31010 @@ -230,8 +231,9 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
31011  		spin_lock_bh(&serv->sv_lock);
31012  		list_add(&newxprt->xpt_list, &serv->sv_permsocks);
31013  		spin_unlock_bh(&serv->sv_lock);
31014 +		newport = svc_xprt_local_port(newxprt);
31015  		clear_bit(XPT_BUSY, &newxprt->xpt_flags);
31016 -		return svc_xprt_local_port(newxprt);
31017 +		return newport;
31018  	}
31019   err:
31020  	spin_unlock(&svc_xprt_class_lock);
31021 @@ -431,8 +433,13 @@ void svc_xprt_received(struct svc_xprt *xprt)
31022  {
31023  	BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
31024  	xprt->xpt_pool = NULL;
31025 +	/* As soon as we clear busy, the xprt could be closed and
31026 +	 * 'put', so we need a reference to call svc_xprt_enqueue with:
31027 +	 */
31028 +	svc_xprt_get(xprt);
31029  	clear_bit(XPT_BUSY, &xprt->xpt_flags);
31030  	svc_xprt_enqueue(xprt);
31031 +	svc_xprt_put(xprt);
31032  }
31033  EXPORT_SYMBOL_GPL(svc_xprt_received);
31034  
31035 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
31036 index 0b39b24..b4cfe20 100644
31037 --- a/net/unix/af_unix.c
31038 +++ b/net/unix/af_unix.c
31039 @@ -1343,9 +1343,25 @@ static void unix_destruct_scm(struct sk_buff *skb)
31040  	sock_wfree(skb);
31041  }
31042  
31043 +#define MAX_RECURSION_LEVEL 4
31044 +
31045  static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
31046  {
31047  	int i;
31048 +	unsigned char max_level = 0;
31049 +	int unix_sock_count = 0;
31050 +
31051 +	for (i = scm->fp->count - 1; i >= 0; i--) {
31052 +		struct sock *sk = unix_get_socket(scm->fp->fp[i]);
31053 +
31054 +		if (sk) {
31055 +			unix_sock_count++;
31056 +			max_level = max(max_level,
31057 +					unix_sk(sk)->recursion_level);
31058 +		}
31059 +	}
31060 +	if (unlikely(max_level > MAX_RECURSION_LEVEL))
31061 +		return -ETOOMANYREFS;
31062  
31063  	/*
31064  	 * Need to duplicate file references for the sake of garbage
31065 @@ -1356,9 +1372,11 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
31066  	if (!UNIXCB(skb).fp)
31067  		return -ENOMEM;
31068  
31069 -	for (i = scm->fp->count-1; i >= 0; i--)
31070 -		unix_inflight(scm->fp->fp[i]);
31071 -	return 0;
31072 +	if (unix_sock_count) {
31073 +		for (i = scm->fp->count - 1; i >= 0; i--)
31074 +			unix_inflight(scm->fp->fp[i]);
31075 +	}
31076 +	return max_level;
31077  }
31078  
31079  static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
31080 @@ -1393,6 +1411,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
31081  	struct sk_buff *skb;
31082  	long timeo;
31083  	struct scm_cookie tmp_scm;
31084 +	int max_level;
31085  
31086  	if (NULL == siocb->scm)
31087  		siocb->scm = &tmp_scm;
31088 @@ -1431,8 +1450,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
31089  		goto out;
31090  
31091  	err = unix_scm_to_skb(siocb->scm, skb, true);
31092 -	if (err)
31093 +	if (err < 0)
31094  		goto out_free;
31095 +	max_level = err + 1;
31096  	unix_get_secdata(siocb->scm, skb);
31097  
31098  	skb_reset_transport_header(skb);
31099 @@ -1512,6 +1532,8 @@ restart:
31100  	}
31101  
31102  	skb_queue_tail(&other->sk_receive_queue, skb);
31103 +	if (max_level > unix_sk(other)->recursion_level)
31104 +		unix_sk(other)->recursion_level = max_level;
31105  	unix_state_unlock(other);
31106  	other->sk_data_ready(other, len);
31107  	sock_put(other);
31108 @@ -1542,6 +1564,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
31109  	int sent = 0;
31110  	struct scm_cookie tmp_scm;
31111  	bool fds_sent = false;
31112 +	int max_level;
31113  
31114  	if (NULL == siocb->scm)
31115  		siocb->scm = &tmp_scm;
31116 @@ -1605,10 +1628,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
31117  
31118  		/* Only send the fds in the first buffer */
31119  		err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
31120 -		if (err) {
31121 +		if (err < 0) {
31122  			kfree_skb(skb);
31123  			goto out_err;
31124  		}
31125 +		max_level = err + 1;
31126  		fds_sent = true;
31127  
31128  		err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
31129 @@ -1624,6 +1648,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
31130  			goto pipe_err_free;
31131  
31132  		skb_queue_tail(&other->sk_receive_queue, skb);
31133 +		if (max_level > unix_sk(other)->recursion_level)
31134 +			unix_sk(other)->recursion_level = max_level;
31135  		unix_state_unlock(other);
31136  		other->sk_data_ready(other, size);
31137  		sent += size;
31138 @@ -1840,6 +1866,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
31139  		unix_state_lock(sk);
31140  		skb = skb_dequeue(&sk->sk_receive_queue);
31141  		if (skb == NULL) {
31142 +			unix_sk(sk)->recursion_level = 0;
31143  			if (copied >= target)
31144  				goto unlock;
31145  
31146 diff --git a/net/unix/garbage.c b/net/unix/garbage.c
31147 index c8df6fd..f89f83b 100644
31148 --- a/net/unix/garbage.c
31149 +++ b/net/unix/garbage.c
31150 @@ -96,7 +96,7 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
31151  unsigned int unix_tot_inflight;
31152  
31153  
31154 -static struct sock *unix_get_socket(struct file *filp)
31155 +struct sock *unix_get_socket(struct file *filp)
31156  {
31157  	struct sock *u_sock = NULL;
31158  	struct inode *inode = filp->f_path.dentry->d_inode;
31159 @@ -259,9 +259,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
31160  }
31161  
31162  static bool gc_in_progress = false;
31163 +#define UNIX_INFLIGHT_TRIGGER_GC 16000
31164  
31165  void wait_for_unix_gc(void)
31166  {
31167 +	/*
31168 +	 * If number of inflight sockets is insane,
31169 +	 * force a garbage collect right now.
31170 +	 */
31171 +	if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
31172 +		unix_gc();
31173  	wait_event(unix_gc_wait, gc_in_progress == false);
31174  }
31175  
31176 diff --git a/net/wireless/chan.c b/net/wireless/chan.c
31177 index d0c92dd..d8f443b 100644
31178 --- a/net/wireless/chan.c
31179 +++ b/net/wireless/chan.c
31180 @@ -44,6 +44,36 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
31181  	return chan;
31182  }
31183  
31184 +static bool can_beacon_sec_chan(struct wiphy *wiphy,
31185 +				struct ieee80211_channel *chan,
31186 +				enum nl80211_channel_type channel_type)
31187 +{
31188 +	struct ieee80211_channel *sec_chan;
31189 +	int diff;
31190 +
31191 +	switch (channel_type) {
31192 +	case NL80211_CHAN_HT40PLUS:
31193 +		diff = 20;
31194 +	case NL80211_CHAN_HT40MINUS:
31195 +		diff = -20;
31196 +	default:
31197 +		return false;
31198 +	}
31199 +
31200 +	sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff);
31201 +	if (!sec_chan)
31202 +		return false;
31203 +
31204 +	/* we'll need a DFS capability later */
31205 +	if (sec_chan->flags & (IEEE80211_CHAN_DISABLED |
31206 +			       IEEE80211_CHAN_PASSIVE_SCAN |
31207 +			       IEEE80211_CHAN_NO_IBSS |
31208 +			       IEEE80211_CHAN_RADAR))
31209 +		return false;
31210 +
31211 +	return true;
31212 +}
31213 +
31214  int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
31215  		      struct wireless_dev *wdev, int freq,
31216  		      enum nl80211_channel_type channel_type)
31217 @@ -68,6 +98,27 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
31218  	if (!chan)
31219  		return -EINVAL;
31220  
31221 +	/* Both channels should be able to initiate communication */
31222 +	if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC ||
31223 +		     wdev->iftype == NL80211_IFTYPE_AP ||
31224 +		     wdev->iftype == NL80211_IFTYPE_AP_VLAN ||
31225 +		     wdev->iftype == NL80211_IFTYPE_MESH_POINT)) {
31226 +		switch (channel_type) {
31227 +		case NL80211_CHAN_HT40PLUS:
31228 +		case NL80211_CHAN_HT40MINUS:
31229 +			if (!can_beacon_sec_chan(&rdev->wiphy, chan,
31230 +						 channel_type)) {
31231 +				printk(KERN_DEBUG
31232 +				       "cfg80211: Secondary channel not "
31233 +				       "allowed to initiate communication\n");
31234 +				return -EINVAL;
31235 +			}
31236 +			break;
31237 +		default:
31238 +			break;
31239 +		}
31240 +	}
31241 +
31242  	result = rdev->ops->set_channel(&rdev->wiphy,
31243  					wdev ? wdev->netdev : NULL,
31244  					chan, channel_type);
31245 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
31246 index 37902a5..9a8cde9 100644
31247 --- a/net/wireless/nl80211.c
31248 +++ b/net/wireless/nl80211.c
31249 @@ -761,11 +761,13 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
31250  
31251  	result = get_rdev_dev_by_info_ifindex(info, &rdev, &netdev);
31252  	if (result)
31253 -		goto unlock;
31254 +		goto unlock_rtnl;
31255  
31256  	result = __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info);
31257  
31258 - unlock:
31259 +	dev_put(netdev);
31260 +	cfg80211_unlock_rdev(rdev);
31261 + unlock_rtnl:
31262  	rtnl_unlock();
31263  
31264  	return result;
31265 @@ -4996,7 +4998,7 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
31266  
31267  	err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
31268  	if (err)
31269 -		goto unlock_rdev;
31270 +		goto unlock_rtnl;
31271  
31272  	wdev = dev->ieee80211_ptr;
31273  
31274 @@ -5013,9 +5015,10 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
31275  	err = rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev,
31276  					     threshold, hysteresis);
31277  
31278 -unlock_rdev:
31279 + unlock_rdev:
31280  	cfg80211_unlock_rdev(rdev);
31281  	dev_put(dev);
31282 + unlock_rtnl:
31283  	rtnl_unlock();
31284  
31285  	return err;
31286 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
31287 index f180db0..edccc09 100644
31288 --- a/net/wireless/reg.c
31289 +++ b/net/wireless/reg.c
31290 @@ -723,7 +723,9 @@ EXPORT_SYMBOL(freq_reg_info);
31291   * on the wiphy with the target_bw specified. Then we can simply use
31292   * that below for the desired_bw_khz below.
31293   */
31294 -static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
31295 +static void handle_channel(struct wiphy *wiphy,
31296 +			   enum nl80211_reg_initiator initiator,
31297 +			   enum ieee80211_band band,
31298  			   unsigned int chan_idx)
31299  {
31300  	int r;
31301 @@ -787,7 +789,9 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
31302  		chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
31303  }
31304  
31305 -static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
31306 +static void handle_band(struct wiphy *wiphy,
31307 +			enum ieee80211_band band,
31308 +			enum nl80211_reg_initiator initiator)
31309  {
31310  	unsigned int i;
31311  	struct ieee80211_supported_band *sband;
31312 @@ -796,7 +800,7 @@ static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
31313  	sband = wiphy->bands[band];
31314  
31315  	for (i = 0; i < sband->n_channels; i++)
31316 -		handle_channel(wiphy, band, i);
31317 +		handle_channel(wiphy, initiator, band, i);
31318  }
31319  
31320  static bool ignore_reg_update(struct wiphy *wiphy,
31321 @@ -812,6 +816,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
31322  	 * desired regulatory domain set
31323  	 */
31324  	if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
31325 +	    initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
31326  	    !is_world_regdom(last_request->alpha2))
31327  		return true;
31328  	return false;
31329 @@ -1033,7 +1038,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
31330  		goto out;
31331  	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
31332  		if (wiphy->bands[band])
31333 -			handle_band(wiphy, band);
31334 +			handle_band(wiphy, band, initiator);
31335  	}
31336  out:
31337  	reg_process_beacons(wiphy);
31338 @@ -1170,7 +1175,7 @@ static int ignore_request(struct wiphy *wiphy,
31339  				return 0;
31340  			return -EALREADY;
31341  		}
31342 -		return REG_INTERSECT;
31343 +		return 0;
31344  	case NL80211_REGDOM_SET_BY_DRIVER:
31345  		if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
31346  			if (regdom_changes(pending_request->alpha2))
31347 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
31348 index 5ca8c71..503ebb8 100644
31349 --- a/net/wireless/scan.c
31350 +++ b/net/wireless/scan.c
31351 @@ -650,14 +650,14 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
31352  	bss = container_of(pub, struct cfg80211_internal_bss, pub);
31353  
31354  	spin_lock_bh(&dev->bss_lock);
31355 +	if (!list_empty(&bss->list)) {
31356 +		list_del_init(&bss->list);
31357 +		dev->bss_generation++;
31358 +		rb_erase(&bss->rbn, &dev->bss_tree);
31359  
31360 -	list_del(&bss->list);
31361 -	dev->bss_generation++;
31362 -	rb_erase(&bss->rbn, &dev->bss_tree);
31363 -
31364 +		kref_put(&bss->ref, bss_release);
31365 +	}
31366  	spin_unlock_bh(&dev->bss_lock);
31367 -
31368 -	kref_put(&bss->ref, bss_release);
31369  }
31370  EXPORT_SYMBOL(cfg80211_unlink_bss);
31371  
31372 diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
31373 index 771bab0..55187c8 100644
31374 --- a/net/x25/x25_facilities.c
31375 +++ b/net/x25/x25_facilities.c
31376 @@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
31377  	while (len > 0) {
31378  		switch (*p & X25_FAC_CLASS_MASK) {
31379  		case X25_FAC_CLASS_A:
31380 +			if (len < 2)
31381 +				return 0;
31382  			switch (*p) {
31383  			case X25_FAC_REVERSE:
31384  				if((p[1] & 0x81) == 0x81) {
31385 @@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
31386  			len -= 2;
31387  			break;
31388  		case X25_FAC_CLASS_B:
31389 +			if (len < 3)
31390 +				return 0;
31391  			switch (*p) {
31392  			case X25_FAC_PACKET_SIZE:
31393  				facilities->pacsize_in  = p[1];
31394 @@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
31395  			len -= 3;
31396  			break;
31397  		case X25_FAC_CLASS_C:
31398 +			if (len < 4)
31399 +				return 0;
31400  			printk(KERN_DEBUG "X.25: unknown facility %02X, "
31401  			       "values %02X, %02X, %02X\n",
31402  			       p[0], p[1], p[2], p[3]);
31403 @@ -132,26 +138,26 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
31404  			len -= 4;
31405  			break;
31406  		case X25_FAC_CLASS_D:
31407 +			if (len < p[1] + 2)
31408 +				return 0;
31409  			switch (*p) {
31410  			case X25_FAC_CALLING_AE:
31411 -				if (p[1] > X25_MAX_DTE_FACIL_LEN)
31412 -					break;
31413 +				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
31414 +					return 0;
31415  				dte_facs->calling_len = p[2];
31416  				memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
31417  				*vc_fac_mask |= X25_MASK_CALLING_AE;
31418  				break;
31419  			case X25_FAC_CALLED_AE:
31420 -				if (p[1] > X25_MAX_DTE_FACIL_LEN)
31421 -					break;
31422 +				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
31423 +					return 0;
31424  				dte_facs->called_len = p[2];
31425  				memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
31426  				*vc_fac_mask |= X25_MASK_CALLED_AE;
31427  				break;
31428  			default:
31429  				printk(KERN_DEBUG "X.25: unknown facility %02X,"
31430 -					"length %d, values %02X, %02X, "
31431 -					"%02X, %02X\n",
31432 -					p[0], p[1], p[2], p[3], p[4], p[5]);
31433 +					"length %d\n", p[0], p[1]);
31434  				break;
31435  			}
31436  			len -= p[1] + 2;
31437 diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
31438 index 6317896..f729f02 100644
31439 --- a/net/x25/x25_in.c
31440 +++ b/net/x25/x25_in.c
31441 @@ -119,6 +119,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
31442  						&x25->vc_facil_mask);
31443  			if (len > 0)
31444  				skb_pull(skb, len);
31445 +			else
31446 +				return -1;
31447  			/*
31448  			 *	Copy any Call User Data.
31449  			 */
31450 diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
31451 index 73e7b95..b25c646 100644
31452 --- a/net/x25/x25_link.c
31453 +++ b/net/x25/x25_link.c
31454 @@ -394,6 +394,7 @@ void __exit x25_link_free(void)
31455  	list_for_each_safe(entry, tmp, &x25_neigh_list) {
31456  		nb = list_entry(entry, struct x25_neigh, node);
31457  		__x25_remove_neigh(nb);
31458 +		dev_put(nb->dev);
31459  	}
31460  	write_unlock_bh(&x25_neigh_list_lock);
31461  }
31462 diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
31463 index 7ef429c..6968f5b 100644
31464 --- a/scripts/kconfig/conf.c
31465 +++ b/scripts/kconfig/conf.c
31466 @@ -466,7 +466,7 @@ int main(int ac, char **av)
31467  	bindtextdomain(PACKAGE, LOCALEDIR);
31468  	textdomain(PACKAGE);
31469  
31470 -	while ((opt = getopt_long_only(ac, av, "", long_opts, NULL)) != -1) {
31471 +	while ((opt = getopt_long(ac, av, "", long_opts, NULL)) != -1) {
31472  		input_mode = (enum input_mode)opt;
31473  		switch (opt) {
31474  		case silentoldconfig:
31475 diff --git a/security/apparmor/rlim_names.h b/security/apparmor/rlim_names.h
31476 new file mode 100644
31477 index 0000000..32e965f
31478 --- /dev/null
31479 +++ b/security/apparmor/rlim_names.h
31480 @@ -0,0 +1,36 @@
31481 +static const char *rlim_names[] = {
31482 +[0]  = "cpu",
31483 +[1]  = "fsize",
31484 +[2]  = "data",
31485 +[3]  = "stack",
31486 +[4]  = "core",
31487 +[5]  = "rss",
31488 +[6]  = "nproc",
31489 +[7]  = "nofile",
31490 +[8]  = "memlock",
31491 +[9]  = "as",
31492 +[10]  = "locks",
31493 +[11]  = "sigpending",
31494 +[12]  = "msgqueue",
31495 +[13]  = "nice",
31496 +[14]  = "rtprio",
31497 +[15]  = "rttime",
31498 +};
31499 +static const int rlim_map[] = {
31500 +RLIMIT_CPU,
31501 +RLIMIT_FSIZE,
31502 +RLIMIT_DATA,
31503 +RLIMIT_STACK,
31504 +RLIMIT_CORE,
31505 +RLIMIT_RSS,
31506 +RLIMIT_NPROC,
31507 +RLIMIT_NOFILE,
31508 +RLIMIT_MEMLOCK,
31509 +RLIMIT_AS,
31510 +RLIMIT_LOCKS,
31511 +RLIMIT_SIGPENDING,
31512 +RLIMIT_MSGQUEUE,
31513 +RLIMIT_NICE,
31514 +RLIMIT_RTPRIO,
31515 +RLIMIT_RTTIME,
31516 +};
31517 diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
31518 index aef8c0a..d661afb 100644
31519 --- a/security/integrity/ima/ima_policy.c
31520 +++ b/security/integrity/ima/ima_policy.c
31521 @@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry,
31522  	result = security_filter_rule_init(entry->lsm[lsm_rule].type,
31523  					   Audit_equal, args,
31524  					   &entry->lsm[lsm_rule].rule);
31525 +	if (!entry->lsm[lsm_rule].rule)
31526 +		return -EINVAL;
31527  	return result;
31528  }
31529  
31530 diff --git a/security/keys/request_key.c b/security/keys/request_key.c
31531 index 0088dd8..0ea52d2 100644
31532 --- a/security/keys/request_key.c
31533 +++ b/security/keys/request_key.c
31534 @@ -403,7 +403,6 @@ link_check_failed:
31535  	return ret;
31536  
31537  link_prealloc_failed:
31538 -	up_write(&dest_keyring->sem);
31539  	mutex_unlock(&user->cons_lock);
31540  	kleave(" = %d [prelink]", ret);
31541  	return ret;
31542 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
31543 index 4796ddd..a623828 100644
31544 --- a/security/selinux/hooks.c
31545 +++ b/security/selinux/hooks.c
31546 @@ -2529,7 +2529,10 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
31547  	sid = tsec->sid;
31548  	newsid = tsec->create_sid;
31549  
31550 -	if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
31551 +	if ((sbsec->flags & SE_SBINITIALIZED) &&
31552 +	    (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
31553 +		newsid = sbsec->mntpoint_sid;
31554 +	else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
31555  		rc = security_transition_sid(sid, dsec->sid,
31556  					     inode_mode_to_security_class(inode->i_mode),
31557  					     &newsid);
31558 diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
31559 index 75ec0c6..8b02b21 100644
31560 --- a/security/selinux/nlmsgtab.c
31561 +++ b/security/selinux/nlmsgtab.c
31562 @@ -65,6 +65,8 @@ static struct nlmsg_perm nlmsg_route_perms[] =
31563  	{ RTM_NEWADDRLABEL,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
31564  	{ RTM_DELADDRLABEL,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
31565  	{ RTM_GETADDRLABEL,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
31566 +	{ RTM_GETDCB,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
31567 +	{ RTM_SETDCB,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
31568  };
31569  
31570  static struct nlmsg_perm nlmsg_firewall_perms[] =
31571 diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
31572 index 10c3a87..b310702 100644
31573 --- a/sound/atmel/ac97c.c
31574 +++ b/sound/atmel/ac97c.c
31575 @@ -33,9 +33,12 @@
31576  #include <linux/dw_dmac.h>
31577  
31578  #include <mach/cpu.h>
31579 -#include <mach/hardware.h>
31580  #include <mach/gpio.h>
31581  
31582 +#ifdef CONFIG_ARCH_AT91
31583 +#include <mach/hardware.h>
31584 +#endif
31585 +
31586  #include "ac97c.h"
31587  
31588  enum {
31589 diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
31590 index 7730575..07efa29 100644
31591 --- a/sound/core/hrtimer.c
31592 +++ b/sound/core/hrtimer.c
31593 @@ -45,12 +45,13 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
31594  {
31595  	struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
31596  	struct snd_timer *t = stime->timer;
31597 +	unsigned long oruns;
31598  
31599  	if (!atomic_read(&stime->running))
31600  		return HRTIMER_NORESTART;
31601  
31602 -	hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
31603 -	snd_timer_interrupt(stime->timer, t->sticks);
31604 +	oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
31605 +	snd_timer_interrupt(stime->timer, t->sticks * oruns);
31606  
31607  	if (!atomic_read(&stime->running))
31608  		return HRTIMER_NORESTART;
31609 diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
31610 index f50ebf2..8442a08 100644
31611 --- a/sound/core/oss/mixer_oss.c
31612 +++ b/sound/core/oss/mixer_oss.c
31613 @@ -618,8 +618,10 @@ static void snd_mixer_oss_put_volume1_vol(struct snd_mixer_oss_file *fmixer,
31614  	if (numid == ID_UNKNOWN)
31615  		return;
31616  	down_read(&card->controls_rwsem);
31617 -	if ((kctl = snd_ctl_find_numid(card, numid)) == NULL)
31618 +	if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) {
31619 +		up_read(&card->controls_rwsem);
31620  		return;
31621 +	}
31622  	uinfo = kzalloc(sizeof(*uinfo), GFP_KERNEL);
31623  	uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
31624  	if (uinfo == NULL || uctl == NULL)
31625 @@ -658,7 +660,7 @@ static void snd_mixer_oss_put_volume1_sw(struct snd_mixer_oss_file *fmixer,
31626  		return;
31627  	down_read(&card->controls_rwsem);
31628  	if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) {
31629 -		up_read(&fmixer->card->controls_rwsem);
31630 +		up_read(&card->controls_rwsem);
31631  		return;
31632  	}
31633  	uinfo = kzalloc(sizeof(*uinfo), GFP_KERNEL);
31634 @@ -797,7 +799,7 @@ static int snd_mixer_oss_get_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned
31635  	uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
31636  	if (uinfo == NULL || uctl == NULL) {
31637  		err = -ENOMEM;
31638 -		goto __unlock;
31639 +		goto __free_only;
31640  	}
31641  	down_read(&card->controls_rwsem);
31642  	kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0);
31643 @@ -826,6 +828,7 @@ static int snd_mixer_oss_get_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned
31644  	err = 0;
31645        __unlock:
31646       	up_read(&card->controls_rwsem);
31647 +      __free_only:
31648        	kfree(uctl);
31649        	kfree(uinfo);
31650        	return err;
31651 @@ -847,7 +850,7 @@ static int snd_mixer_oss_put_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned
31652  	uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
31653  	if (uinfo == NULL || uctl == NULL) {
31654  		err = -ENOMEM;
31655 -		goto __unlock;
31656 +		goto __free_only;
31657  	}
31658  	down_read(&card->controls_rwsem);
31659  	kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0);
31660 @@ -880,6 +883,7 @@ static int snd_mixer_oss_put_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned
31661  	err = 0;
31662        __unlock:
31663  	up_read(&card->controls_rwsem);
31664 +      __free_only:
31665  	kfree(uctl);
31666  	kfree(uinfo);
31667  	return err;
31668 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
31669 index 5c8c7df..aed06c9 100644
31670 --- a/sound/core/oss/pcm_oss.c
31671 +++ b/sound/core/oss/pcm_oss.c
31672 @@ -1510,16 +1510,19 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
31673  static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
31674  {
31675  	struct snd_pcm_substream *substream;
31676 +	struct snd_pcm_runtime *runtime;
31677 +	int i;
31678  
31679 -	substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
31680 -	if (substream != NULL) {
31681 -		snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
31682 -		substream->runtime->oss.prepare = 1;
31683 -	}
31684 -	substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
31685 -	if (substream != NULL) {
31686 +	for (i = 0; i < 2; i++) {
31687 +		substream = pcm_oss_file->streams[i];
31688 +		if (!substream)
31689 +			continue;
31690 +		runtime = substream->runtime;
31691  		snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
31692 -		substream->runtime->oss.prepare = 1;
31693 +		runtime->oss.prepare = 1;
31694 +		runtime->oss.buffer_used = 0;
31695 +		runtime->oss.prev_hw_ptr_period = 0;
31696 +		runtime->oss.period_ptr = 0;
31697  	}
31698  	return 0;
31699  }
31700 diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c
31701 index 07f803e..3f420ff 100644
31702 --- a/sound/oss/soundcard.c
31703 +++ b/sound/oss/soundcard.c
31704 @@ -86,7 +86,7 @@ int *load_mixer_volumes(char *name, int *levels, int present)
31705  	int             i, n;
31706  
31707  	for (i = 0; i < num_mixer_volumes; i++) {
31708 -		if (strcmp(name, mixer_vols[i].name) == 0) {
31709 +		if (strncmp(name, mixer_vols[i].name, 32) == 0) {
31710  			if (present)
31711  				mixer_vols[i].num = i;
31712  			return mixer_vols[i].levels;
31713 @@ -98,7 +98,7 @@ int *load_mixer_volumes(char *name, int *levels, int present)
31714  	}
31715  	n = num_mixer_volumes++;
31716  
31717 -	strcpy(mixer_vols[n].name, name);
31718 +	strncpy(mixer_vols[n].name, name, 32);
31719  
31720  	if (present)
31721  		mixer_vols[n].num = n;
31722 diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
31723 index b9d2f20..5439d66 100644
31724 --- a/sound/pci/au88x0/au88x0_pcm.c
31725 +++ b/sound/pci/au88x0/au88x0_pcm.c
31726 @@ -42,11 +42,7 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_adb = {
31727  	.rate_min = 5000,
31728  	.rate_max = 48000,
31729  	.channels_min = 1,
31730 -#ifdef CHIP_AU8830
31731 -	.channels_max = 4,
31732 -#else
31733  	.channels_max = 2,
31734 -#endif
31735  	.buffer_bytes_max = 0x10000,
31736  	.period_bytes_min = 0x1,
31737  	.period_bytes_max = 0x1000,
31738 @@ -115,6 +111,17 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_wt = {
31739  	.periods_max = 64,
31740  };
31741  #endif
31742 +#ifdef CHIP_AU8830
31743 +static unsigned int au8830_channels[3] = {
31744 +	1, 2, 4,
31745 +};
31746 +
31747 +static struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = {
31748 +	.count = ARRAY_SIZE(au8830_channels),
31749 +	.list = au8830_channels,
31750 +	.mask = 0,
31751 +};
31752 +#endif
31753  /* open callback */
31754  static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
31755  {
31756 @@ -156,6 +163,15 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
31757  		if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB
31758  		    || VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_I2S)
31759  			runtime->hw = snd_vortex_playback_hw_adb;
31760 +#ifdef CHIP_AU8830
31761 +		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
31762 +			VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) {
31763 +			runtime->hw.channels_max = 4;
31764 +			snd_pcm_hw_constraint_list(runtime, 0,
31765 +				SNDRV_PCM_HW_PARAM_CHANNELS,
31766 +				&hw_constraints_au8830_channels);
31767 +		}
31768 +#endif
31769  		substream->runtime->private_data = NULL;
31770  	}
31771  #ifndef CHIP_AU8810
31772 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
31773 index 1482921..b959c9f 100644
31774 --- a/sound/pci/hda/hda_codec.c
31775 +++ b/sound/pci/hda/hda_codec.c
31776 @@ -1216,6 +1216,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
31777  	struct hda_codec *c;
31778  	struct hda_cvt_setup *p;
31779  	unsigned int oldval, newval;
31780 +	int type;
31781  	int i;
31782  
31783  	if (!nid)
31784 @@ -1254,10 +1255,12 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
31785  	p->dirty = 0;
31786  
31787  	/* make other inactive cvts with the same stream-tag dirty */
31788 +	type = get_wcaps_type(get_wcaps(codec, nid));
31789  	list_for_each_entry(c, &codec->bus->codec_list, list) {
31790  		for (i = 0; i < c->cvt_setups.used; i++) {
31791  			p = snd_array_elem(&c->cvt_setups, i);
31792 -			if (!p->active && p->stream_tag == stream_tag)
31793 +			if (!p->active && p->stream_tag == stream_tag &&
31794 +			    get_wcaps_type(get_wcaps(codec, p->nid)) == type)
31795  				p->dirty = 1;
31796  		}
31797  	}
31798 @@ -1281,6 +1284,9 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
31799  	if (!nid)
31800  		return;
31801  
31802 +	if (codec->no_sticky_stream)
31803 +		do_now = 1;
31804 +
31805  	snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid);
31806  	p = get_hda_cvt_setup(codec, nid);
31807  	if (p) {
31808 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
31809 index 62c7022..c3ad374 100644
31810 --- a/sound/pci/hda/hda_codec.h
31811 +++ b/sound/pci/hda/hda_codec.h
31812 @@ -850,6 +850,7 @@ struct hda_codec {
31813  	unsigned int pin_amp_workaround:1; /* pin out-amp takes index
31814  					    * (e.g. Conexant codecs)
31815  					    */
31816 +	unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
31817  	unsigned int pins_shutup:1;	/* pins are shut up */
31818  	unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
31819  #ifdef CONFIG_SND_HDA_POWER_SAVE
31820 diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
31821 index 26c3ade..3c17a0a 100644
31822 --- a/sound/pci/hda/hda_eld.c
31823 +++ b/sound/pci/hda/hda_eld.c
31824 @@ -381,7 +381,7 @@ static void hdmi_show_short_audio_desc(struct cea_sad *a)
31825  	snd_print_pcm_rates(a->rates, buf, sizeof(buf));
31826  
31827  	if (a->format == AUDIO_CODING_TYPE_LPCM)
31828 -		snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8));
31829 +		snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8);
31830  	else if (a->max_bitrate)
31831  		snprintf(buf2, sizeof(buf2),
31832  				", max bitrate = %d", a->max_bitrate);
31833 @@ -604,24 +604,19 @@ void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm,
31834  {
31835  	int i;
31836  
31837 -	pcm->rates = 0;
31838 -	pcm->formats = 0;
31839 -	pcm->maxbps = 0;
31840 -	pcm->channels_min = -1;
31841 -	pcm->channels_max = 0;
31842 +	/* assume basic audio support (the basic audio flag is not in ELD;
31843 +	 * however, all audio capable sinks are required to support basic
31844 +	 * audio) */
31845 +	pcm->rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000;
31846 +	pcm->formats = SNDRV_PCM_FMTBIT_S16_LE;
31847 +	pcm->maxbps = 16;
31848 +	pcm->channels_max = 2;
31849  	for (i = 0; i < eld->sad_count; i++) {
31850  		struct cea_sad *a = &eld->sad[i];
31851  		pcm->rates |= a->rates;
31852 -		if (a->channels < pcm->channels_min)
31853 -			pcm->channels_min = a->channels;
31854  		if (a->channels > pcm->channels_max)
31855  			pcm->channels_max = a->channels;
31856  		if (a->format == AUDIO_CODING_TYPE_LPCM) {
31857 -			if (a->sample_bits & AC_SUPPCM_BITS_16) {
31858 -				pcm->formats |= SNDRV_PCM_FMTBIT_S16_LE;
31859 -				if (pcm->maxbps < 16)
31860 -					pcm->maxbps = 16;
31861 -			}
31862  			if (a->sample_bits & AC_SUPPCM_BITS_20) {
31863  				pcm->formats |= SNDRV_PCM_FMTBIT_S32_LE;
31864  				if (pcm->maxbps < 20)
31865 @@ -641,7 +636,6 @@ void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm,
31866  	/* restrict the parameters by the values the codec provides */
31867  	pcm->rates &= codec_pars->rates;
31868  	pcm->formats &= codec_pars->formats;
31869 -	pcm->channels_min = max(pcm->channels_min, codec_pars->channels_min);
31870  	pcm->channels_max = min(pcm->channels_max, codec_pars->channels_max);
31871  	pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps);
31872  }
31873 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
31874 index 34940a0..a8c6f34 100644
31875 --- a/sound/pci/hda/hda_intel.c
31876 +++ b/sound/pci/hda/hda_intel.c
31877 @@ -458,6 +458,7 @@ enum {
31878  	AZX_DRIVER_ULI,
31879  	AZX_DRIVER_NVIDIA,
31880  	AZX_DRIVER_TERA,
31881 +	AZX_DRIVER_CTX,
31882  	AZX_DRIVER_GENERIC,
31883  	AZX_NUM_DRIVERS, /* keep this as last entry */
31884  };
31885 @@ -473,6 +474,7 @@ static char *driver_short_names[] __devinitdata = {
31886  	[AZX_DRIVER_ULI] = "HDA ULI M5461",
31887  	[AZX_DRIVER_NVIDIA] = "HDA NVidia",
31888  	[AZX_DRIVER_TERA] = "HDA Teradici", 
31889 +	[AZX_DRIVER_CTX] = "HDA Creative",
31890  	[AZX_DRIVER_GENERIC] = "HD-Audio Generic",
31891  };
31892  
31893 @@ -563,7 +565,10 @@ static void azx_init_cmd_io(struct azx *chip)
31894  	/* reset the rirb hw write pointer */
31895  	azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
31896  	/* set N=1, get RIRB response interrupt for new entry */
31897 -	azx_writew(chip, RINTCNT, 1);
31898 +	if (chip->driver_type == AZX_DRIVER_CTX)
31899 +		azx_writew(chip, RINTCNT, 0xc0);
31900 +	else
31901 +		azx_writew(chip, RINTCNT, 1);
31902  	/* enable rirb dma and response irq */
31903  	azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
31904  	spin_unlock_irq(&chip->reg_lock);
31905 @@ -1136,8 +1141,11 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
31906  	/* clear rirb int */
31907  	status = azx_readb(chip, RIRBSTS);
31908  	if (status & RIRB_INT_MASK) {
31909 -		if (status & RIRB_INT_RESPONSE)
31910 +		if (status & RIRB_INT_RESPONSE) {
31911 +			if (chip->driver_type == AZX_DRIVER_CTX)
31912 +				udelay(80);
31913  			azx_update_rirb(chip);
31914 +		}
31915  		azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
31916  	}
31917  
31918 @@ -1647,7 +1655,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
31919  	struct azx_dev *azx_dev = get_azx_dev(substream);
31920  	struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
31921  	struct snd_pcm_runtime *runtime = substream->runtime;
31922 -	unsigned int bufsize, period_bytes, format_val;
31923 +	unsigned int bufsize, period_bytes, format_val, stream_tag;
31924  	int err;
31925  
31926  	azx_stream_reset(chip, azx_dev);
31927 @@ -1689,7 +1697,12 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
31928  	else
31929  		azx_dev->fifo_size = 0;
31930  
31931 -	return snd_hda_codec_prepare(apcm->codec, hinfo, azx_dev->stream_tag,
31932 +	stream_tag = azx_dev->stream_tag;
31933 +	/* CA-IBG chips need the playback stream starting from 1 */
31934 +	if (chip->driver_type == AZX_DRIVER_CTX &&
31935 +	    stream_tag > chip->capture_streams)
31936 +		stream_tag -= chip->capture_streams;
31937 +	return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
31938  				     azx_dev->format_val, substream);
31939  }
31940  
31941 @@ -2285,9 +2298,11 @@ static int azx_dev_free(struct snd_device *device)
31942   */
31943  static struct snd_pci_quirk position_fix_list[] __devinitdata = {
31944  	SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
31945 +	SND_PCI_QUIRK(0x1025, 0x026f, "Acer Aspire 5538", POS_FIX_LPIB),
31946  	SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
31947  	SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
31948  	SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
31949 +	SND_PCI_QUIRK(0x1028, 0x0470, "Dell Inspiron 1120", POS_FIX_LPIB),
31950  	SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
31951  	SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
31952  	SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
31953 @@ -2794,10 +2809,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
31954  	{ PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID),
31955  	  .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
31956  	  .class_mask = 0xffffff,
31957 -	  .driver_data = AZX_DRIVER_GENERIC },
31958 +	  .driver_data = AZX_DRIVER_CTX },
31959  #else
31960  	/* this entry seems still valid -- i.e. without emu20kx chip */
31961 -	{ PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_GENERIC },
31962 +	{ PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_CTX },
31963  #endif
31964  	/* AMD/ATI Generic, PCI class code and Vendor ID for HD Audio */
31965  	{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
31966 diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
31967 index 10bbbaf..0da636d 100644
31968 --- a/sound/pci/hda/patch_analog.c
31969 +++ b/sound/pci/hda/patch_analog.c
31970 @@ -1276,6 +1276,7 @@ static int patch_ad1986a(struct hda_codec *codec)
31971  	spec->multiout.no_share_stream = 1;
31972  
31973  	codec->no_trigger_sense = 1;
31974 +	codec->no_sticky_stream = 1;
31975  
31976  	return 0;
31977  }
31978 @@ -1463,6 +1464,7 @@ static int patch_ad1983(struct hda_codec *codec)
31979  	codec->patch_ops = ad198x_patch_ops;
31980  
31981  	codec->no_trigger_sense = 1;
31982 +	codec->no_sticky_stream = 1;
31983  
31984  	return 0;
31985  }
31986 @@ -1917,6 +1919,7 @@ static int patch_ad1981(struct hda_codec *codec)
31987  	}
31988  
31989  	codec->no_trigger_sense = 1;
31990 +	codec->no_sticky_stream = 1;
31991  
31992  	return 0;
31993  }
31994 @@ -3235,6 +3238,7 @@ static int patch_ad1988(struct hda_codec *codec)
31995  	spec->vmaster_nid = 0x04;
31996  
31997  	codec->no_trigger_sense = 1;
31998 +	codec->no_sticky_stream = 1;
31999  
32000  	return 0;
32001  }
32002 @@ -3449,6 +3453,7 @@ static int patch_ad1884(struct hda_codec *codec)
32003  	codec->patch_ops = ad198x_patch_ops;
32004  
32005  	codec->no_trigger_sense = 1;
32006 +	codec->no_sticky_stream = 1;
32007  
32008  	return 0;
32009  }
32010 @@ -4422,6 +4427,7 @@ static int patch_ad1884a(struct hda_codec *codec)
32011  	}
32012  
32013  	codec->no_trigger_sense = 1;
32014 +	codec->no_sticky_stream = 1;
32015  
32016  	return 0;
32017  }
32018 @@ -4761,6 +4767,7 @@ static int patch_ad1882(struct hda_codec *codec)
32019  	}
32020  
32021  	codec->no_trigger_sense = 1;
32022 +	codec->no_sticky_stream = 1;
32023  
32024  	return 0;
32025  }
32026 diff --git a/sound/pci/hda/patch_ca0110.c b/sound/pci/hda/patch_ca0110.c
32027 index af47801..9544463 100644
32028 --- a/sound/pci/hda/patch_ca0110.c
32029 +++ b/sound/pci/hda/patch_ca0110.c
32030 @@ -489,7 +489,7 @@ static void parse_digital(struct hda_codec *codec)
32031  	if (cfg->dig_outs &&
32032  	    snd_hda_get_connections(codec, cfg->dig_out_pins[0],
32033  				    &spec->dig_out, 1) == 1)
32034 -		spec->multiout.dig_out_nid = cfg->dig_out_pins[0];
32035 +		spec->multiout.dig_out_nid = spec->dig_out;
32036  }
32037  
32038  static int ca0110_parse_auto_config(struct hda_codec *codec)
32039 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
32040 index 972e7c4..5dbff45 100644
32041 --- a/sound/pci/hda/patch_conexant.c
32042 +++ b/sound/pci/hda/patch_conexant.c
32043 @@ -393,10 +393,16 @@ static int conexant_add_jack(struct hda_codec *codec,
32044  	struct conexant_spec *spec;
32045  	struct conexant_jack *jack;
32046  	const char *name;
32047 -	int err;
32048 +	int i, err;
32049  
32050  	spec = codec->spec;
32051  	snd_array_init(&spec->jacks, sizeof(*jack), 32);
32052 +
32053 +	jack = spec->jacks.list;
32054 +	for (i = 0; i < spec->jacks.used; i++, jack++)
32055 +		if (jack->nid == nid)
32056 +			return 0 ; /* already present */
32057 +
32058  	jack = snd_array_new(&spec->jacks);
32059  	name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ;
32060  
32061 @@ -3085,13 +3091,13 @@ static const char *cxt5066_models[CXT5066_MODELS] = {
32062  static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
32063  	SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
32064  		      CXT5066_LAPTOP),
32065 -	SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
32066 -		      CXT5066_DELL_LAPTOP),
32067 +	SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
32068  	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
32069  	SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
32070  	SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
32071  	SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
32072  	SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
32073 +	SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP),
32074  	SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
32075  	SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
32076  	SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
32077 @@ -3099,6 +3105,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
32078  	SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
32079  	SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
32080  	SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
32081 +	SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD),
32082   	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
32083   	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
32084  	SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
32085 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
32086 index afd6022..4ab7c5c 100644
32087 --- a/sound/pci/hda/patch_hdmi.c
32088 +++ b/sound/pci/hda/patch_hdmi.c
32089 @@ -779,7 +779,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
32090  			return -ENODEV;
32091  	} else {
32092  		/* fallback to the codec default */
32093 -		hinfo->channels_min = codec_pars->channels_min;
32094  		hinfo->channels_max = codec_pars->channels_max;
32095  		hinfo->rates = codec_pars->rates;
32096  		hinfo->formats = codec_pars->formats;
32097 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
32098 index a432e6e..52b07fb 100644
32099 --- a/sound/pci/hda/patch_realtek.c
32100 +++ b/sound/pci/hda/patch_realtek.c
32101 @@ -1438,6 +1438,7 @@ do_sku:
32102  		spec->init_amp = ALC_INIT_GPIO3;
32103  		break;
32104  	case 5:
32105 +	default:
32106  		spec->init_amp = ALC_INIT_DEFAULT;
32107  		break;
32108  	}
32109 @@ -4388,6 +4389,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = {
32110  	SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
32111  	SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
32112  	SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
32113 +	SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_LG),
32114  	SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_LG),
32115  	SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_LG_LW),
32116  	SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_TCL_S700),
32117 @@ -9664,7 +9666,6 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
32118  	SND_PCI_QUIRK(0x17aa, 0x3bfc, "Lenovo NB0763", ALC883_LENOVO_NB0763),
32119  	SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763),
32120  	SND_PCI_QUIRK(0x17aa, 0x101d, "Lenovo Sky", ALC888_LENOVO_SKY),
32121 -	SND_PCI_QUIRK(0x17c0, 0x4071, "MEDION MD2", ALC883_MEDION_MD2),
32122  	SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG),
32123  	SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG),
32124  	SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66),
32125 @@ -16557,7 +16558,7 @@ static struct alc_config_preset alc861vd_presets[] = {
32126  static int alc861vd_auto_create_input_ctls(struct hda_codec *codec,
32127  						const struct auto_pin_cfg *cfg)
32128  {
32129 -	return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x09, 0);
32130 +	return alc_auto_create_input_ctls(codec, cfg, 0x0b, 0x22, 0);
32131  }
32132  
32133  
32134 @@ -18612,6 +18613,8 @@ static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid)
32135  		return 0x02;
32136  	else if (nid >= 0x0c && nid <= 0x0e)
32137  		return nid - 0x0c + 0x02;
32138 +	else if (nid == 0x26) /* ALC887-VD has this DAC too */
32139 +		return 0x25;
32140  	else
32141  		return 0;
32142  }
32143 @@ -18620,7 +18623,7 @@ static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid)
32144  static hda_nid_t alc662_dac_to_mix(struct hda_codec *codec, hda_nid_t pin,
32145  				   hda_nid_t dac)
32146  {
32147 -	hda_nid_t mix[4];
32148 +	hda_nid_t mix[5];
32149  	int i, num;
32150  
32151  	num = snd_hda_get_connections(codec, pin, mix, ARRAY_SIZE(mix));
32152 @@ -18935,6 +18938,37 @@ static void alc662_auto_init(struct hda_codec *codec)
32153  		alc_inithook(codec);
32154  }
32155  
32156 +enum {
32157 +	ALC662_FIXUP_ASPIRE,
32158 +	ALC662_FIXUP_IDEAPAD,
32159 +};
32160 +
32161 +static const struct alc_fixup alc662_fixups[] = {
32162 +	[ALC662_FIXUP_ASPIRE] = {
32163 +		.pins = (const struct alc_pincfg[]) {
32164 +			{ 0x15, 0x99130112 }, /* subwoofer */
32165 +			{ }
32166 +		}
32167 +	},
32168 +	[ALC662_FIXUP_IDEAPAD] = {
32169 +		.pins = (const struct alc_pincfg[]) {
32170 +			{ 0x17, 0x99130112 }, /* subwoofer */
32171 +			{ }
32172 +		}
32173 +	},
32174 +};
32175 +
32176 +static struct snd_pci_quirk alc662_fixup_tbl[] = {
32177 +	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
32178 +	SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
32179 +	SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
32180 +	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
32181 +	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
32182 +	{}
32183 +};
32184 +
32185 +
32186 +
32187  static int patch_alc662(struct hda_codec *codec)
32188  {
32189  	struct alc_spec *spec;
32190 @@ -18967,6 +19001,7 @@ static int patch_alc662(struct hda_codec *codec)
32191  	}
32192  
32193  	if (board_config == ALC662_AUTO) {
32194 +		alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 1);
32195  		/* automatic parse from the BIOS config */
32196  		err = alc662_parse_auto_config(codec);
32197  		if (err < 0) {
32198 @@ -19025,8 +19060,11 @@ static int patch_alc662(struct hda_codec *codec)
32199  	spec->vmaster_nid = 0x02;
32200  
32201  	codec->patch_ops = alc_patch_ops;
32202 -	if (board_config == ALC662_AUTO)
32203 +	if (board_config == ALC662_AUTO) {
32204  		spec->init_hook = alc662_auto_init;
32205 +		alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 0);
32206 +	}
32207 +
32208  #ifdef CONFIG_SND_HDA_POWER_SAVE
32209  	if (!spec->loopback.amplist)
32210  		spec->loopback.amplist = alc662_loopbacks;
32211 @@ -19039,7 +19077,10 @@ static int patch_alc888(struct hda_codec *codec)
32212  {
32213  	if ((alc_read_coef_idx(codec, 0) & 0x00f0)==0x0030){
32214  		kfree(codec->chip_name);
32215 -		codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL);
32216 +		if (codec->vendor_id == 0x10ec0887)
32217 +			codec->chip_name = kstrdup("ALC887-VD", GFP_KERNEL);
32218 +		else
32219 +			codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL);
32220  		if (!codec->chip_name) {
32221  			alc_free(codec);
32222  			return -ENOMEM;
32223 @@ -19521,7 +19562,7 @@ static struct hda_codec_preset snd_hda_preset_realtek[] = {
32224  	{ .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A",
32225  	  .patch = patch_alc882 },
32226  	{ .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 },
32227 -	{ .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc882 },
32228 +	{ .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc888 },
32229  	{ .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200",
32230  	  .patch = patch_alc882 },
32231  	{ .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc888 },
32232 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
32233 index c16c5ba..78f1206 100644
32234 --- a/sound/pci/hda/patch_sigmatel.c
32235 +++ b/sound/pci/hda/patch_sigmatel.c
32236 @@ -93,6 +93,7 @@ enum {
32237  	STAC_92HD83XXX_REF,
32238  	STAC_92HD83XXX_PWR_REF,
32239  	STAC_DELL_S14,
32240 +	STAC_DELL_E6410,
32241  	STAC_92HD83XXX_HP,
32242  	STAC_HP_DV7_4000,
32243  	STAC_92HD83XXX_MODELS
32244 @@ -1618,6 +1619,8 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
32245  static struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
32246  	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1,
32247  		      "Alienware M17x", STAC_ALIENWARE_M17X),
32248 +	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
32249 +		      "Alienware M17x", STAC_ALIENWARE_M17X),
32250  	{} /* terminator */
32251  };
32252  
32253 @@ -1633,6 +1636,13 @@ static unsigned int dell_s14_pin_configs[10] = {
32254  	0x40f000f0, 0x40f000f0,
32255  };
32256  
32257 +/* Deliberately turn off 0x0f (Dock Mic) to make it choose Int Mic instead */
32258 +static unsigned int dell_e6410_pin_configs[10] = {
32259 +	0x04a11020, 0x0421101f, 0x400000f0, 0x90170110,
32260 +	0x23011050, 0x40f000f0, 0x400000f0, 0x90a60130,
32261 +	0x40f000f0, 0x40f000f0,
32262 +};
32263 +
32264  static unsigned int hp_dv7_4000_pin_configs[10] = {
32265  	0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110,
32266  	0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140,
32267 @@ -1643,6 +1653,7 @@ static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = {
32268  	[STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs,
32269  	[STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs,
32270  	[STAC_DELL_S14] = dell_s14_pin_configs,
32271 +	[STAC_DELL_E6410] = dell_e6410_pin_configs,
32272  	[STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
32273  };
32274  
32275 @@ -1651,6 +1662,7 @@ static const char *stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
32276  	[STAC_92HD83XXX_REF] = "ref",
32277  	[STAC_92HD83XXX_PWR_REF] = "mic-ref",
32278  	[STAC_DELL_S14] = "dell-s14",
32279 +	[STAC_DELL_E6410] = "dell-e6410",
32280  	[STAC_92HD83XXX_HP] = "hp",
32281  	[STAC_HP_DV7_4000] = "hp-dv7-4000",
32282  };
32283 @@ -1663,6 +1675,10 @@ static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
32284  		      "DFI LanParty", STAC_92HD83XXX_REF),
32285  	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba,
32286  		      "unknown Dell", STAC_DELL_S14),
32287 +	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x040a,
32288 +		      "Dell E6410", STAC_DELL_E6410),
32289 +	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x040b,
32290 +		      "Dell E6510", STAC_DELL_E6410),
32291  	SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600,
32292  		      "HP", STAC_92HD83XXX_HP),
32293  	{} /* terminator */
32294 diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
32295 index 4677492..ebfa1f8 100644
32296 --- a/sound/pci/intel8x0.c
32297 +++ b/sound/pci/intel8x0.c
32298 @@ -1866,6 +1866,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
32299  	},
32300  	{
32301  		.subvendor = 0x1028,
32302 +		.subdevice = 0x0182,
32303 +		.name = "Dell Latitude D610",	/* STAC9750/51 */
32304 +		.type = AC97_TUNE_HP_ONLY
32305 +	},
32306 +	{
32307 +		.subvendor = 0x1028,
32308  		.subdevice = 0x0186,
32309  		.name = "Dell Latitude D810", /* cf. Malone #41015 */
32310  		.type = AC97_TUNE_HP_MUTE_LED
32311 diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
32312 index c0eba51..d91f2e4 100644
32313 --- a/sound/soc/blackfin/bf5xx-ac97.c
32314 +++ b/sound/soc/blackfin/bf5xx-ac97.c
32315 @@ -260,9 +260,9 @@ static int bf5xx_ac97_suspend(struct snd_soc_dai *dai)
32316  	pr_debug("%s : sport %d\n", __func__, dai->id);
32317  	if (!dai->active)
32318  		return 0;
32319 -	if (dai->capture.active)
32320 +	if (dai->capture_active)
32321  		sport_rx_stop(sport);
32322 -	if (dai->playback.active)
32323 +	if (dai->playback_active)
32324  		sport_tx_stop(sport);
32325  	return 0;
32326  }
32327 diff --git a/sound/soc/blackfin/bf5xx-tdm.c b/sound/soc/blackfin/bf5xx-tdm.c
32328 index 24c1426..2503778 100644
32329 --- a/sound/soc/blackfin/bf5xx-tdm.c
32330 +++ b/sound/soc/blackfin/bf5xx-tdm.c
32331 @@ -210,7 +210,7 @@ static int bf5xx_tdm_set_channel_map(struct snd_soc_dai *dai,
32332  #ifdef CONFIG_PM
32333  static int bf5xx_tdm_suspend(struct snd_soc_dai *dai)
32334  {
32335 -	struct sport_device *sport = dai->private_data;
32336 +	struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
32337  
32338  	if (!dai->active)
32339  		return 0;
32340 diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
32341 index 72deeab..a961fc6 100644
32342 --- a/sound/soc/codecs/wm8580.c
32343 +++ b/sound/soc/codecs/wm8580.c
32344 @@ -171,7 +171,7 @@
32345  static const u16 wm8580_reg[] = {
32346  	0x0121, 0x017e, 0x007d, 0x0014, /*R3*/
32347  	0x0121, 0x017e, 0x007d, 0x0194, /*R7*/
32348 -	0x001c, 0x0002, 0x0002, 0x00c2, /*R11*/
32349 +	0x0010, 0x0002, 0x0002, 0x00c2, /*R11*/
32350  	0x0182, 0x0082, 0x000a, 0x0024, /*R15*/
32351  	0x0009, 0x0000, 0x00ff, 0x0000, /*R19*/
32352  	0x00ff, 0x00ff, 0x00ff, 0x00ff, /*R23*/
32353 diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
32354 index 5da17a7..4b8ffc2 100644
32355 --- a/sound/soc/codecs/wm8900.c
32356 +++ b/sound/soc/codecs/wm8900.c
32357 @@ -188,7 +188,6 @@ static int wm8900_volatile_register(unsigned int reg)
32358  {
32359  	switch (reg) {
32360  	case WM8900_REG_ID:
32361 -	case WM8900_REG_POWER1:
32362  		return 1;
32363  	default:
32364  		return 0;
32365 @@ -1236,11 +1235,6 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
32366  		goto err;
32367  	}
32368  
32369 -	/* Read back from the chip */
32370 -	reg = snd_soc_read(codec, WM8900_REG_POWER1);
32371 -	reg = (reg >> 12) & 0xf;
32372 -	dev_info(&i2c->dev, "WM8900 revision %d\n", reg);
32373 -
32374  	wm8900_reset(codec);
32375  
32376  	/* Turn the chip on */
32377 diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
32378 index f7dcabf..f0897d8 100644
32379 --- a/sound/soc/codecs/wm8904.c
32380 +++ b/sound/soc/codecs/wm8904.c
32381 @@ -820,7 +820,8 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol,
32382  	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
32383  	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
32384  
32385 -	return wm8904->deemph;
32386 +	ucontrol->value.enumerated.item[0] = wm8904->deemph;
32387 +	return 0;
32388  }
32389  
32390  static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
32391 diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
32392 index 5f02559..004e837 100644
32393 --- a/sound/soc/codecs/wm8955.c
32394 +++ b/sound/soc/codecs/wm8955.c
32395 @@ -384,7 +384,8 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
32396  	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
32397  	struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
32398  
32399 -	return wm8955->deemph;
32400 +	ucontrol->value.enumerated.item[0] = wm8955->deemph;
32401 +	return 0;
32402  }
32403  
32404  static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
32405 diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
32406 index 3c6ee61..2331f51 100644
32407 --- a/sound/soc/codecs/wm8960.c
32408 +++ b/sound/soc/codecs/wm8960.c
32409 @@ -137,7 +137,8 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol,
32410  	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
32411  	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
32412  
32413 -	return wm8960->deemph;
32414 +	ucontrol->value.enumerated.item[0] = wm8960->deemph;
32415 +	return 0;
32416  }
32417  
32418  static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
32419 diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
32420 index 2549d3a..3bd65c8 100644
32421 --- a/sound/soc/codecs/wm8961.c
32422 +++ b/sound/soc/codecs/wm8961.c
32423 @@ -711,7 +711,7 @@ static int wm8961_hw_params(struct snd_pcm_substream *substream,
32424  	if (fs <= 24000)
32425  		reg |= WM8961_DACSLOPE;
32426  	else
32427 -		reg &= WM8961_DACSLOPE;
32428 +		reg &= ~WM8961_DACSLOPE;
32429  	snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
32430  
32431  	return 0;
32432 @@ -736,7 +736,7 @@ static int wm8961_set_sysclk(struct snd_soc_dai *dai, int clk_id,
32433  		freq /= 2;
32434  	} else {
32435  		dev_dbg(codec->dev, "Using MCLK/1 for %dHz MCLK\n", freq);
32436 -		reg &= WM8961_MCLKDIV;
32437 +		reg &= ~WM8961_MCLKDIV;
32438  	}
32439  
32440  	snd_soc_write(codec, WM8961_CLOCKING1, reg);
32441 diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
32442 index dd8d909..0022c7a 100644
32443 --- a/sound/soc/codecs/wm8990.c
32444 +++ b/sound/soc/codecs/wm8990.c
32445 @@ -1183,7 +1183,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
32446  				     WM8990_VMIDTOG);
32447  
32448  			/* Delay to allow output caps to discharge */
32449 -			msleep(msecs_to_jiffies(300));
32450 +			msleep(300);
32451  
32452  			/* Disable VMIDTOG */
32453  			snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
32454 @@ -1195,17 +1195,17 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
32455  			/* Enable outputs */
32456  			snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00);
32457  
32458 -			msleep(msecs_to_jiffies(50));
32459 +			msleep(50);
32460  
32461  			/* Enable VMID at 2x50k */
32462  			snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02);
32463  
32464 -			msleep(msecs_to_jiffies(100));
32465 +			msleep(100);
32466  
32467  			/* Enable VREF */
32468  			snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
32469  
32470 -			msleep(msecs_to_jiffies(600));
32471 +			msleep(600);
32472  
32473  			/* Enable BUFIOEN */
32474  			snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
32475 @@ -1250,7 +1250,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
32476  		/* Disable VMID */
32477  		snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01);
32478  
32479 -		msleep(msecs_to_jiffies(300));
32480 +		msleep(300);
32481  
32482  		/* Enable all output discharge bits */
32483  		snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
32484 diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
32485 index 522249d..f3732c2 100644
32486 --- a/sound/soc/codecs/wm8994.c
32487 +++ b/sound/soc/codecs/wm8994.c
32488 @@ -2519,18 +2519,18 @@ SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
32489  SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
32490  SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
32491  
32492 -SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture",
32493 +SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
32494  		     0, WM8994_POWER_MANAGEMENT_4, 9, 0),
32495 -SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture",
32496 +SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
32497  		     0, WM8994_POWER_MANAGEMENT_4, 8, 0),
32498  SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0,
32499  		    WM8994_POWER_MANAGEMENT_5, 9, 0),
32500  SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0,
32501  		    WM8994_POWER_MANAGEMENT_5, 8, 0),
32502  
32503 -SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
32504 +SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
32505  		     0, WM8994_POWER_MANAGEMENT_4, 11, 0),
32506 -SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
32507 +SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
32508  		     0, WM8994_POWER_MANAGEMENT_4, 10, 0),
32509  SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0,
32510  		    WM8994_POWER_MANAGEMENT_5, 11, 0),
32511 @@ -2571,6 +2571,7 @@ SND_SOC_DAPM_AIF_IN("AIF2DACR", NULL, 0,
32512  
32513  SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
32514  SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
32515 +SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
32516  SND_SOC_DAPM_AIF_OUT("AIF2ADCDAT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
32517  
32518  SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux),
32519 @@ -2766,6 +2767,11 @@ static const struct snd_soc_dapm_route intercon[] = {
32520  	{ "AIF2DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" },
32521  	{ "AIF2DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" },
32522  
32523 +	{ "AIF1ADCDAT", NULL, "AIF1ADC1L" },
32524 +	{ "AIF1ADCDAT", NULL, "AIF1ADC1R" },
32525 +	{ "AIF1ADCDAT", NULL, "AIF1ADC2L" },
32526 +	{ "AIF1ADCDAT", NULL, "AIF1ADC2R" },
32527 +
32528  	{ "AIF2ADCDAT", NULL, "AIF2ADC Mux" },
32529  
32530  	/* AIF3 output */
32531 @@ -2914,6 +2920,7 @@ static int wm8994_set_fll(struct snd_soc_dai *dai, int id, int src,
32532  		/* Allow no source specification when stopping */
32533  		if (freq_out)
32534  			return -EINVAL;
32535 +		src = wm8994->fll[id].src;
32536  		break;
32537  	case WM8994_FLL_SRC_MCLK1:
32538  	case WM8994_FLL_SRC_MCLK2:
32539 @@ -3485,7 +3492,7 @@ static int wm8994_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
32540  	else
32541  		val = 0;
32542  
32543 -	return snd_soc_update_bits(codec, reg, mask, reg);
32544 +	return snd_soc_update_bits(codec, reg, mask, val);
32545  }
32546  
32547  #define WM8994_RATES SNDRV_PCM_RATE_8000_96000
32548 diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
32549 index 2cb8153..359ad88 100644
32550 --- a/sound/soc/codecs/wm_hubs.c
32551 +++ b/sound/soc/codecs/wm_hubs.c
32552 @@ -92,6 +92,7 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec, unsigned int op)
32553  static void calibrate_dc_servo(struct snd_soc_codec *codec)
32554  {
32555  	struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
32556 +	s8 offset;
32557  	u16 reg, reg_l, reg_r, dcs_cfg;
32558  
32559  	/* Set for 32 series updates */
32560 @@ -130,16 +131,14 @@ static void calibrate_dc_servo(struct snd_soc_codec *codec)
32561  		dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r);
32562  
32563  		/* HPOUT1L */
32564 -		if (reg_l + hubs->dcs_codes > 0 &&
32565 -		    reg_l + hubs->dcs_codes < 0xff)
32566 -			reg_l += hubs->dcs_codes;
32567 -		dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
32568 +		offset = reg_l;
32569 +		offset += hubs->dcs_codes;
32570 +		dcs_cfg = (u8)offset << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
32571  
32572  		/* HPOUT1R */
32573 -		if (reg_r + hubs->dcs_codes > 0 &&
32574 -		    reg_r + hubs->dcs_codes < 0xff)
32575 -			reg_r += hubs->dcs_codes;
32576 -		dcs_cfg |= reg_r;
32577 +		offset = reg_r;
32578 +		offset += hubs->dcs_codes;
32579 +		dcs_cfg |= (u8)offset;
32580  
32581  		dev_dbg(codec->dev, "DCS result: %x\n", dcs_cfg);
32582  
32583 @@ -293,7 +292,7 @@ SOC_DOUBLE_R("Speaker Switch",
32584  SOC_DOUBLE_R("Speaker ZC Switch",
32585  	     WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT,
32586  	     7, 1, 0),
32587 -SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 0, 3, 7, 0,
32588 +SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 3, 0, 7, 0,
32589  	       spkboost_tlv),
32590  SOC_ENUM("Speaker Reference", speaker_ref),
32591  SOC_ENUM("Speaker Mode", speaker_mode),
32592 diff --git a/sound/usb/midi.c b/sound/usb/midi.c
32593 index b9c2bc6..0eb1ade 100644
32594 --- a/sound/usb/midi.c
32595 +++ b/sound/usb/midi.c
32596 @@ -843,8 +843,8 @@ static void snd_usbmidi_us122l_output(struct snd_usb_midi_out_endpoint *ep,
32597  		return;
32598  	}
32599  
32600 -	memset(urb->transfer_buffer + count, 0xFD, 9 - count);
32601 -	urb->transfer_buffer_length = count;
32602 +	memset(urb->transfer_buffer + count, 0xFD, ep->max_transfer - count);
32603 +	urb->transfer_buffer_length = ep->max_transfer;
32604  }
32605  
32606  static struct usb_protocol_ops snd_usbmidi_122l_ops = {
32607 @@ -1288,6 +1288,13 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi,
32608  	case USB_ID(0x1a86, 0x752d): /* QinHeng CH345 "USB2.0-MIDI" */
32609  		ep->max_transfer = 4;
32610  		break;
32611 +		/*
32612 +		 * Some devices only work with 9 bytes packet size:
32613 +		 */
32614 +	case USB_ID(0x0644, 0x800E): /* Tascam US-122L */
32615 +	case USB_ID(0x0644, 0x800F): /* Tascam US-144 */
32616 +		ep->max_transfer = 9;
32617 +		break;
32618  	}
32619  	for (i = 0; i < OUTPUT_URBS; ++i) {
32620  		buffer = usb_alloc_coherent(umidi->dev,
32621 diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
32622 index 6ef68e4..084e6fc 100644
32623 --- a/sound/usb/usx2y/us122l.c
32624 +++ b/sound/usb/usx2y/us122l.c
32625 @@ -273,29 +273,26 @@ static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw,
32626  					  struct file *file, poll_table *wait)
32627  {
32628  	struct us122l	*us122l = hw->private_data;
32629 -	struct usb_stream *s = us122l->sk.s;
32630  	unsigned	*polled;
32631  	unsigned int	mask;
32632  
32633  	poll_wait(file, &us122l->sk.sleep, wait);
32634  
32635 -	switch (s->state) {
32636 -	case usb_stream_ready:
32637 -		if (us122l->first == file)
32638 -			polled = &s->periods_polled;
32639 -		else
32640 -			polled = &us122l->second_periods_polled;
32641 -		if (*polled != s->periods_done) {
32642 -			*polled = s->periods_done;
32643 -			mask = POLLIN | POLLOUT | POLLWRNORM;
32644 -			break;
32645 +	mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
32646 +	if (mutex_trylock(&us122l->mutex)) {
32647 +		struct usb_stream *s = us122l->sk.s;
32648 +		if (s && s->state == usb_stream_ready) {
32649 +			if (us122l->first == file)
32650 +				polled = &s->periods_polled;
32651 +			else
32652 +				polled = &us122l->second_periods_polled;
32653 +			if (*polled != s->periods_done) {
32654 +				*polled = s->periods_done;
32655 +				mask = POLLIN | POLLOUT | POLLWRNORM;
32656 +			} else
32657 +				mask = 0;
32658  		}
32659 -		/* Fall through */
32660 -		mask = 0;
32661 -		break;
32662 -	default:
32663 -		mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
32664 -		break;
32665 +		mutex_unlock(&us122l->mutex);
32666  	}
32667  	return mask;
32668  }
32669 @@ -381,6 +378,7 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
32670  {
32671  	struct usb_stream_config *cfg;
32672  	struct us122l *us122l = hw->private_data;
32673 +	struct usb_stream *s;
32674  	unsigned min_period_frames;
32675  	int err = 0;
32676  	bool high_speed;
32677 @@ -426,18 +424,18 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
32678  	snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
32679  
32680  	mutex_lock(&us122l->mutex);
32681 +	s = us122l->sk.s;
32682  	if (!us122l->master)
32683  		us122l->master = file;
32684  	else if (us122l->master != file) {
32685 -		if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) {
32686 +		if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
32687  			err = -EIO;
32688  			goto unlock;
32689  		}
32690  		us122l->slave = file;
32691  	}
32692 -	if (!us122l->sk.s ||
32693 -	    memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) ||
32694 -	    us122l->sk.s->state == usb_stream_xrun) {
32695 +	if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
32696 +	    s->state == usb_stream_xrun) {
32697  		us122l_stop(us122l);
32698  		if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
32699  			err = -EIO;
32700 @@ -448,6 +446,7 @@ unlock:
32701  	mutex_unlock(&us122l->mutex);
32702  free:
32703  	kfree(cfg);
32704 +	wake_up_all(&us122l->sk.sleep);
32705  	return err;
32706  }
32707  

Attached Files

To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.
  • [get | view] (2015-06-04 21:22:59, 244.8 KB) [[attachment:MC2-liblitmus-imx6-rtss15.patch]]
  • [get | view] (2016-05-12 14:35:37, 51.9 KB) [[attachment:MC2-liblitmus-rtss16.patch]]
  • [get | view] (2016-05-12 14:36:06, 190.4 KB) [[attachment:MC2-litmus-rt-rtss16.patch]]
  • [get | view] (2015-07-19 10:27:52, 1119.9 KB) [[attachment:MC2-litmut-rt-imx6-rtss15.patch]]
  • [get | view] (2014-05-27 20:46:19, 58.3 KB) [[attachment:MC2_liblitmus_ipdps15.patch]]
  • [get | view] (2014-05-27 20:45:43, 1044.3 KB) [[attachment:MC2_litmusrt_ipdps15.patch]]
  • [get | view] (2017-04-07 21:48:09, 6099.5 KB) [[attachment:buff_sharing.tar]]
  • [get | view] (2015-01-08 14:20:07, 61.0 KB) [[attachment:feather-trace-patch-against-sched-deadline-v8.patch]]
  • [get | view] (2014-04-01 23:10:10, 38.9 KB) [[attachment:gedf-mp-rtas14.patch]]
  • [get | view] (2012-03-02 20:13:59, 1.9 KB) [[attachment:gpu-klmirqd-liblitmus-rt-ecrts12.patch]]
  • [get | view] (2012-03-02 20:14:25, 389.8 KB) [[attachment:gpu-klmirqd-litmus-rt-ecrts12.patch]]
  • [get | view] (2012-05-26 21:41:34, 418.0 KB) [[attachment:gpusync-rtss12.patch]]
  • [get | view] (2012-05-26 21:42:20, 8.6 KB) [[attachment:gpusync_liblitmus-rtss12.patch]]
  • [get | view] (2013-05-21 15:32:08, 208.6 KB) [[attachment:gpusync_rtss13_liblitmus.patch]]
  • [get | view] (2013-05-21 15:31:32, 779.5 KB) [[attachment:gpusync_rtss13_litmus.patch]]
  • [get | view] (2012-05-26 21:42:41, 71.4 KB) [[attachment:klt_tracker_v1.0.litmus.tgz]]
  • [get | view] (2016-10-13 21:14:05, 19.6 KB) [[attachment:liblitmus-rtas17.patch]]
  • [get | view] (2017-05-01 20:46:22, 90.0 KB) [[attachment:liblitmus-rtns17.patch]]
  • [get | view] (2018-12-11 01:38:53, 49.1 KB) [[attachment:liblitmus-semi-part-with-edfos.patch]]
  • [get | view] (2017-10-09 19:16:09, 304.0 KB) [[attachment:litmus-rt-os-isolation.patch]]
  • [get | view] (2016-10-13 21:13:27, 207.6 KB) [[attachment:litmus-rt-rtas17.patch]]
  • [get | view] (2017-05-01 20:46:40, 207.6 KB) [[attachment:litmus-rt-rtns17.patch]]
  • [get | view] (2018-12-11 01:39:04, 100.5 KB) [[attachment:litmus-rt-semi-part-with-edfos.patch]]
  • [get | view] (2018-06-26 04:31:48, 7.0 KB) [[attachment:mc2_liblitmus_2015.1-rtns18.patch]]
  • [get | view] (2018-06-26 04:31:33, 292.7 KB) [[attachment:mc2_litmus-rt_2015.1-rtns18.patch]]
  • [get | view] (2017-05-01 20:45:10, 2596.9 KB) [[attachment:mcp_study.zip]]
  • [get | view] (2013-07-13 14:11:53, 58.0 KB) [[attachment:omip-ecrts13.patch]]
  • [get | view] (2014-02-19 21:48:33, 17.2 KB) [[attachment:pgmrt-liblitmus-ecrts14.patch]]
  • [get | view] (2014-02-19 21:47:57, 87.8 KB) [[attachment:pgmrt-litmusrt-ecrts14.patch]]
  • [get | view] (2015-01-08 14:22:32, 61.0 KB) [[attachment:sched-deadline-v8-feather-trace-rtas14.patch]]
  • [get | view] (2018-06-26 04:32:13, 2545.1 KB) [[attachment:sched_study_rtns2018.tar.gz]]
  • [get | view] (2017-04-07 21:53:39, 5969.5 KB) [[attachment:seminal.tar]]
  • [get | view] (2017-04-07 21:51:13, 6064.0 KB) [[attachment:shared_libraries.tar]]
  • [get | view] (2013-07-13 13:58:25, 42.7 KB) [[attachment:tracing-and-dflp-rtas13.patch]]
 All files | Selected Files: delete move to page copy to page

You are not allowed to attach a file to this page.