Attachment 'litmus-rt-os-isolation.patch'

Download

   1 diff --git arch/arm/boot/compressed/Makefile arch/arm/boot/compressed/Makefile
   2 index 6e1fb2b..e2284fe 100644
   3 --- arch/arm/boot/compressed/Makefile
   4 +++ arch/arm/boot/compressed/Makefile
   5 @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
   6  ORIG_CFLAGS := $(KBUILD_CFLAGS)
   7  KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
   8  endif
   9 +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
  10  
  11  ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  12  asflags-y := -DZIMAGE
  13 diff --git arch/arm/include/asm/dma-mapping.h arch/arm/include/asm/dma-mapping.h
  14 index b52101d..fef26e0 100644
  15 --- arch/arm/include/asm/dma-mapping.h
  16 +++ arch/arm/include/asm/dma-mapping.h
  17 @@ -219,6 +219,13 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  18  	void *cpu_addr;
  19  	BUG_ON(!ops);
  20  
  21 +#ifdef CONFIG_SCHED_DEBUG_TRACE
  22 +	if (flag&GFP_COLOR) {
  23 +		printk(KERN_INFO "dma_alloc_attrs() \n");
  24 +		printk(KERN_INFO "func: %pF at address: %p\n", ops->alloc, ops->alloc);
  25 +	}
  26 +#endif
  27 +	
  28  	cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
  29  	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  30  	return cpu_addr;
  31 diff --git arch/arm/include/asm/unistd.h arch/arm/include/asm/unistd.h
  32 index 0c462a9..5291b70 100644
  33 --- arch/arm/include/asm/unistd.h
  34 +++ arch/arm/include/asm/unistd.h
  35 @@ -19,7 +19,8 @@
  36   * This may need to be greater than __NR_last_syscall+1 in order to
  37   * account for the padding in the syscall table
  38   */
  39 -#define __NR_syscalls  (388 + NR_litmus_syscalls)
  40 +#define __NR_syscalls  (388 + NR_litmus_syscalls + 0)
  41 +
  42  
  43  /*
  44   * *NOTE*: This is a ghost syscall private to the kernel.  Only the
  45 diff --git arch/arm/kernel/calls.S arch/arm/kernel/calls.S
  46 index f4738a8..55dc863 100644
  47 --- arch/arm/kernel/calls.S
  48 +++ arch/arm/kernel/calls.S
  49 @@ -409,6 +409,14 @@
  50          CALL(sys_wait_for_ts_release)
  51  		CALL(sys_release_ts)
  52  		CALL(sys_null_call)
  53 +/* 400 */	CALL(sys_get_current_budget)
  54 +		CALL(sys_reservation_create)
  55 +		CALL(sys_reservation_destroy)
  56 +		CALL(sys_set_mc2_task_param)
  57 +		CALL(sys_set_page_color)
  58 +/* 405 */	CALL(sys_test_call)
  59 +		CALL(sys_run_test)
  60 +		CALL(sys_lock_buffer)
  61  
  62  #ifndef syscalls_counted
  63  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
  64 diff --git arch/arm/kernel/irq.c arch/arm/kernel/irq.c
  65 index 350f188..a9ba6e5 100644
  66 --- arch/arm/kernel/irq.c
  67 +++ arch/arm/kernel/irq.c
  68 @@ -44,6 +44,9 @@
  69  #include <asm/mach/irq.h>
  70  #include <asm/mach/time.h>
  71  
  72 +#include <litmus/cache_proc.h>
  73 +#include <litmus/litmus.h>
  74 +
  75  unsigned long irq_err_count;
  76  
  77  int arch_show_interrupts(struct seq_file *p, int prec)
  78 @@ -66,7 +69,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
  79   */
  80  void handle_IRQ(unsigned int irq, struct pt_regs *regs)
  81  {
  82 +	enter_irq_mode();
  83  	__handle_domain_irq(NULL, irq, false, regs);
  84 +	exit_irq_mode();
  85  }
  86  
  87  /*
  88 diff --git arch/arm/mach-imx/mmdc.c arch/arm/mach-imx/mmdc.c
  89 index 0411f06..03dce5b 100644
  90 --- arch/arm/mach-imx/mmdc.c
  91 +++ arch/arm/mach-imx/mmdc.c
  92 @@ -40,6 +40,7 @@ static int imx_mmdc_probe(struct platform_device *pdev)
  93  	reg = mmdc_base + MMDC_MDMISC;
  94  	/* Get ddr type */
  95  	val = readl_relaxed(reg);
  96 +	pr_info("MMDC_MDMISC reg: 0x%08x\n", val);
  97  	ddr_type = (val & BM_MMDC_MDMISC_DDR_TYPE) >>
  98  		 BP_MMDC_MDMISC_DDR_TYPE;
  99  
 100 diff --git arch/arm/mm/cache-l2x0.c arch/arm/mm/cache-l2x0.c
 101 index e309c8f..969da4a 100644
 102 --- arch/arm/mm/cache-l2x0.c
 103 +++ arch/arm/mm/cache-l2x0.c
 104 @@ -33,6 +33,8 @@
 105  #include "cache-tauros3.h"
 106  #include "cache-aurora-l2.h"
 107  
 108 +#include <litmus/cache_proc.h>
 109 +
 110  struct l2c_init_data {
 111  	const char *type;
 112  	unsigned way_size_0;
 113 @@ -651,6 +653,11 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
 114  	 */
 115  	aux |= L310_AUX_CTRL_NS_LOCKDOWN;
 116  
 117 +	/*
 118 +	 * Always enable non-secure interrupt access control registers
 119 +	 */
 120 +	aux |= L220_AUX_CTRL_NS_INT_CTRL;
 121 +
 122  	l2c_enable(base, aux, num_lock);
 123  
 124  	/* Read back resulting AUX_CTRL value as it could have been altered. */
 125 @@ -726,7 +733,6 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
 126  
 127  	if (n) {
 128  		unsigned i;
 129 -
 130  		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
 131  		for (i = 0; i < n; i++)
 132  			pr_cont(" %s", errata[i]);
 133 @@ -774,6 +780,11 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
 134  	},
 135  };
 136  
 137 +void l2c310_flush_all(void)
 138 +{
 139 +	l2c210_flush_all();
 140 +};
 141 +
 142  static int __init __l2c_init(const struct l2c_init_data *data,
 143  			     u32 aux_val, u32 aux_mask, u32 cache_id)
 144  {
 145 @@ -876,6 +887,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
 146  	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
 147  		data->type, cache_id, aux);
 148  
 149 +	litmus_setup_lockdown(l2x0_base, cache_id);
 150 +	
 151  	return 0;
 152  }
 153  
 154 diff --git arch/arm/mm/dma-mapping.c arch/arm/mm/dma-mapping.c
 155 index 7e7583d..8a297ad 100644
 156 --- arch/arm/mm/dma-mapping.c
 157 +++ arch/arm/mm/dma-mapping.c
 158 @@ -259,7 +259,8 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 159  	page = alloc_pages(gfp, order);
 160  	if (!page)
 161  		return NULL;
 162 -
 163 +	if (gfp&GFP_COLOR)
 164 +		printk(KERN_INFO "__dma_alloc_buffer(): size %d, order %ld requested\n", size, order);
 165  	/*
 166  	 * Now split the huge page and free the excess pages
 167  	 */
 168 @@ -341,6 +342,24 @@ void __init init_dma_coherent_pool_size(unsigned long size)
 169  		atomic_pool_size = size;
 170  }
 171  
 172 +#define BANK_MASK  0x38000000     
 173 +#define BANK_SHIFT  27
 174 +
 175 +#define CACHE_MASK  0x0000f000      
 176 +#define CACHE_SHIFT 12
 177 +
 178 +/* Decoding page color, 0~15 */ 
 179 +static inline unsigned int page_color(struct page *page)
 180 +{
 181 +	return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
 182 +}
 183 +
 184 +/* Decoding page bank number, 0~7 */ 
 185 +static inline unsigned int page_bank(struct page *page)
 186 +{
 187 +	return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
 188 +}
 189 +
 190  /*
 191   * Initialise the coherent pool for atomic allocations.
 192   */
 193 @@ -375,6 +394,7 @@ static int __init atomic_pool_init(void)
 194  				(void *)PAGE_SHIFT);
 195  		pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
 196  		       atomic_pool_size / 1024);
 197 +		pr_info("DMA: coherent pool located in 0x%p phys %08x color %d bank %d\n", ptr, page_to_phys(page), page_color(page), page_bank(page));
 198  		return 0;
 199  	}
 200  
 201 @@ -644,15 +664,17 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 202  	*handle = DMA_ERROR_CODE;
 203  	size = PAGE_ALIGN(size);
 204  	want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
 205 -
 206 +	
 207  	if (is_coherent || nommu())
 208  		addr = __alloc_simple_buffer(dev, size, gfp, &page);
 209  	else if (!(gfp & __GFP_WAIT))
 210  		addr = __alloc_from_pool(size, &page);
 211 -	else if (!dev_get_cma_area(dev))
 212 +	else if (!dev_get_cma_area(dev)) {
 213  		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
 214 -	else
 215 +		//printk(KERN_INFO "__alloc_remap_buffer returned %p page, size %d, color %d, bank %d, pfn %05lx\n", page, size, page_color(page), page_bank(page), page_to_pfn(page));
 216 +	} else {
 217  		addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
 218 +	}
 219  
 220  	if (page)
 221  		*handle = pfn_to_dma(dev, page_to_pfn(page));
 222 @@ -670,6 +692,17 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 223  	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 224  	void *memory;
 225  
 226 +	/*
 227 +	if ((gfp&GFP_COLOR) && (size > PAGE_SIZE*4)) {
 228 +#ifdef CONFIG_SCHED_DEBUG_TRACE
 229 +		printk(KERN_INFO "arm_dma_alloc(): original prot %08x\n", prot);
 230 +#endif
 231 +		//prot = pgprot_noncached(prot);
 232 +#ifdef CONFIG_SCHED_DEBUG_TRACE
 233 +		printk(KERN_INFO "arm_dma_alloc(): set as uncacheable prot %08x\n", prot);
 234 +#endif
 235 +	}
 236 +	*/
 237  	if (dma_alloc_from_coherent(dev, size, handle, &memory))
 238  		return memory;
 239  
 240 diff --git arch/x86/syscalls/syscall_32.tbl arch/x86/syscalls/syscall_32.tbl
 241 index 34680a5..b303a9b 100644
 242 --- arch/x86/syscalls/syscall_32.tbl
 243 +++ arch/x86/syscalls/syscall_32.tbl
 244 @@ -377,3 +377,11 @@
 245  368	i386	wait_for_ts_release	sys_wait_for_ts_release
 246  369	i386	release_ts		sys_release_ts
 247  370	i386	null_call		sys_null_call
 248 +371	i386	get_current_budget	sys_get_current_budget
 249 +372	i386	reservation_create	sys_reservation_create
 250 +373	i386	reservation_destroy	sys_reservation_destroy
 251 +374	i386	set_mc2_task_param	sys_set_mc2_task_param
 252 +375	i386	set_page_color		sys_set_page_color
 253 +376	i386	test_call		sys_test_call
 254 +377	i386	run_test		sys_run_test
 255 +378	i386	lock_buffer		sys_lock_buffer
 256 diff --git arch/x86/syscalls/syscall_64.tbl arch/x86/syscalls/syscall_64.tbl
 257 index cbd1b6b..5f24a80 100644
 258 --- arch/x86/syscalls/syscall_64.tbl
 259 +++ arch/x86/syscalls/syscall_64.tbl
 260 @@ -342,6 +342,14 @@
 261  360	common	wait_for_ts_release	sys_wait_for_ts_release
 262  361	common	release_ts		sys_release_ts
 263  362	common	null_call		sys_null_call
 264 +363	common	get_current_budget	sys_get_current_budget
 265 +364	common	reservation_create	sys_reservation_create
 266 +365	common	reservation_destroy	sys_reservation_destroy
 267 +366	common	set_mc2_task_param	sys_set_mc2_task_param
 268 +367	common	set_page_color		sys_set_page_color
 269 +368	common	test_call		sys_test_call
 270 +369	common	run_test		sys_run_test
 271 +370	common	lock_buffer		sys_lock_buffer
 272  
 273  #
 274  # x32-specific system call numbers start at 512 to avoid cache impact
 275 diff --git drivers/media/usb/uvc/uvc_v4l2.c drivers/media/usb/uvc/uvc_v4l2.c
 276 index c4b1ac6..e40daf9 100644
 277 --- drivers/media/usb/uvc/uvc_v4l2.c
 278 +++ drivers/media/usb/uvc/uvc_v4l2.c
 279 @@ -1437,7 +1437,9 @@ static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
 280  	struct uvc_streaming *stream = handle->stream;
 281  
 282  	uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_mmap\n");
 283 -
 284 +#if 0
 285 +	printk(KERN_INFO "uvc_mmap entry point\n");
 286 +#endif
 287  	return uvc_queue_mmap(&stream->queue, vma);
 288  }
 289  
 290 diff --git drivers/media/usb/uvc/uvc_video.c drivers/media/usb/uvc/uvc_video.c
 291 index 20ccc9d..d4a64fc 100644
 292 --- drivers/media/usb/uvc/uvc_video.c
 293 +++ drivers/media/usb/uvc/uvc_video.c
 294 @@ -26,6 +26,13 @@
 295  
 296  #include "uvcvideo.h"
 297  
 298 +#define USE_LEVEL_A_BANK	1
 299 +#ifdef USE_LEVEL_A_BANK
 300 +#define UVC_FLAG	(GFP_COLOR)
 301 +#else
 302 +#define UVC_FLAG	(0)
 303 +#endif	
 304 +
 305  /* ------------------------------------------------------------------------
 306   * UVC Controls
 307   */
 308 @@ -167,7 +174,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
 309  			query == UVC_GET_DEF)
 310  		return -EIO;
 311  
 312 -	data = kmalloc(size, GFP_KERNEL);
 313 +	data = kmalloc(size, GFP_KERNEL|UVC_FLAG);
 314  	if (data == NULL)
 315  		return -ENOMEM;
 316  
 317 @@ -251,7 +258,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
 318  	int ret;
 319  
 320  	size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
 321 -	data = kzalloc(size, GFP_KERNEL);
 322 +	data = kzalloc(size, GFP_KERNEL|UVC_FLAG);
 323  	if (data == NULL)
 324  		return -ENOMEM;
 325  
 326 @@ -494,7 +501,7 @@ static int uvc_video_clock_init(struct uvc_streaming *stream)
 327  	clock->size = 32;
 328  
 329  	clock->samples = kmalloc(clock->size * sizeof(*clock->samples),
 330 -				 GFP_KERNEL);
 331 +				 GFP_KERNEL|UVC_FLAG);
 332  	if (clock->samples == NULL)
 333  		return -ENOMEM;
 334  
 335 @@ -1343,7 +1350,7 @@ static void uvc_video_complete(struct urb *urb)
 336  
 337  	stream->decode(urb, stream, buf);
 338  
 339 -	if ((ret = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
 340 +	if ((ret = usb_submit_urb(urb, GFP_ATOMIC|UVC_FLAG)) < 0) {
 341  		uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
 342  			ret);
 343  	}
 344 @@ -1406,10 +1413,10 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
 345  #ifndef CONFIG_DMA_NONCOHERENT
 346  			stream->urb_buffer[i] = usb_alloc_coherent(
 347  				stream->dev->udev, stream->urb_size,
 348 -				gfp_flags | __GFP_NOWARN, &stream->urb_dma[i]);
 349 +				gfp_flags | __GFP_NOWARN | UVC_FLAG, &stream->urb_dma[i]);
 350  #else
 351  			stream->urb_buffer[i] =
 352 -			    kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
 353 +			    kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN | UVC_FLAG;
 354  #endif
 355  			if (!stream->urb_buffer[i]) {
 356  				uvc_free_urb_buffers(stream);
 357 @@ -1492,14 +1499,14 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
 358  	psize = uvc_endpoint_max_bpi(stream->dev->udev, ep);
 359  	size = stream->ctrl.dwMaxVideoFrameSize;
 360  
 361 -	npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags);
 362 +	npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags|UVC_FLAG);
 363  	if (npackets == 0)
 364  		return -ENOMEM;
 365  
 366  	size = npackets * psize;
 367  
 368  	for (i = 0; i < UVC_URBS; ++i) {
 369 -		urb = usb_alloc_urb(npackets, gfp_flags);
 370 +		urb = usb_alloc_urb(npackets, gfp_flags|UVC_FLAG);
 371  		if (urb == NULL) {
 372  			uvc_uninit_video(stream, 1);
 373  			return -ENOMEM;
 374 @@ -1548,7 +1555,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
 375  	size = stream->ctrl.dwMaxPayloadTransferSize;
 376  	stream->bulk.max_payload_size = size;
 377  
 378 -	npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags);
 379 +	npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags|UVC_FLAG);
 380  	if (npackets == 0)
 381  		return -ENOMEM;
 382  
 383 @@ -1565,7 +1572,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
 384  		size = 0;
 385  
 386  	for (i = 0; i < UVC_URBS; ++i) {
 387 -		urb = usb_alloc_urb(0, gfp_flags);
 388 +		urb = usb_alloc_urb(0, gfp_flags|UVC_FLAG);
 389  		if (urb == NULL) {
 390  			uvc_uninit_video(stream, 1);
 391  			return -ENOMEM;
 392 @@ -1654,7 +1661,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
 393  		if (ret < 0)
 394  			return ret;
 395  
 396 -		ret = uvc_init_video_isoc(stream, best_ep, gfp_flags);
 397 +		ret = uvc_init_video_isoc(stream, best_ep, gfp_flags|UVC_FLAG);
 398  	} else {
 399  		/* Bulk endpoint, proceed to URB initialization. */
 400  		ep = uvc_find_endpoint(&intf->altsetting[0],
 401 @@ -1662,7 +1669,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
 402  		if (ep == NULL)
 403  			return -EIO;
 404  
 405 -		ret = uvc_init_video_bulk(stream, ep, gfp_flags);
 406 +		ret = uvc_init_video_bulk(stream, ep, gfp_flags|UVC_FLAG);
 407  	}
 408  
 409  	if (ret < 0)
 410 @@ -1670,7 +1677,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
 411  
 412  	/* Submit the URBs. */
 413  	for (i = 0; i < UVC_URBS; ++i) {
 414 -		ret = usb_submit_urb(stream->urb[i], gfp_flags);
 415 +		ret = usb_submit_urb(stream->urb[i], gfp_flags|UVC_FLAG);
 416  		if (ret < 0) {
 417  			uvc_printk(KERN_ERR, "Failed to submit URB %u "
 418  					"(%d).\n", i, ret);
 419 @@ -1741,7 +1748,7 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset)
 420  	if (ret < 0)
 421  		return ret;
 422  
 423 -	return uvc_init_video(stream, GFP_NOIO);
 424 +	return uvc_init_video(stream, GFP_NOIO|UVC_FLAG);
 425  }
 426  
 427  /* ------------------------------------------------------------------------
 428 @@ -1892,7 +1899,7 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
 429  	if (ret < 0)
 430  		goto error_commit;
 431  
 432 -	ret = uvc_init_video(stream, GFP_KERNEL);
 433 +	ret = uvc_init_video(stream, GFP_KERNEL|UVC_FLAG);
 434  	if (ret < 0)
 435  		goto error_video;
 436  
 437 diff --git drivers/media/v4l2-core/videobuf2-core.c drivers/media/v4l2-core/videobuf2-core.c
 438 index 66ada01..ef75f1f 100644
 439 --- drivers/media/v4l2-core/videobuf2-core.c
 440 +++ drivers/media/v4l2-core/videobuf2-core.c
 441 @@ -30,6 +30,13 @@
 442  #include <media/v4l2-common.h>
 443  #include <media/videobuf2-core.h>
 444  
 445 +#define USE_LEVEL_A_BANK	1
 446 +#ifdef USE_LEVEL_A_BANK
 447 +#define VB2_CORE_FLAG	(GFP_COLOR)
 448 +#else
 449 +#define VB2_CORE_FLAG	(0)
 450 +#endif
 451 +
 452  static int debug;
 453  module_param(debug, int, 0644);
 454  
 455 @@ -200,7 +207,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
 456  	 */
 457  	for (plane = 0; plane < vb->num_planes; ++plane) {
 458  		unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
 459 -
 460 +		printk(KERN_INFO "__vb2_buf_mem_alloc(): size %ld, func %pF GFP_COLOR? %d\n", size, vb->vb2_queue->mem_ops->alloc, q->gfp_flags&GFP_COLOR);
 461  		mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
 462  				      size, dma_dir, q->gfp_flags);
 463  		if (IS_ERR_OR_NULL(mem_priv))
 464 @@ -352,7 +359,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
 465  
 466  	for (buffer = 0; buffer < num_buffers; ++buffer) {
 467  		/* Allocate videobuf buffer structures */
 468 -		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
 469 +		vb = kzalloc(q->buf_struct_size, GFP_KERNEL|VB2_CORE_FLAG);
 470  		if (!vb) {
 471  			dprintk(1, "memory alloc for buffer struct failed\n");
 472  			break;
 473 @@ -402,7 +409,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
 474  
 475  	dprintk(1, "allocated %d buffers, %d plane(s) each\n",
 476  			buffer, num_planes);
 477 -
 478 +	printk(KERN_INFO "allocated %d buffers, %d plane(s) each\n",
 479 +			buffer, num_planes);
 480  	return buffer;
 481  }
 482  
 483 @@ -2237,6 +2245,7 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
 484  	 * Tell driver to start streaming provided sufficient buffers
 485  	 * are available.
 486  	 */
 487 +printk(KERN_INFO "vb2_internal_streamon()\n");
 488  	if (q->queued_count >= q->min_buffers_needed) {
 489  		ret = vb2_start_streaming(q);
 490  		if (ret) {
 491 @@ -2525,7 +2534,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
 492  			"MMAP invalid, as it would overflow buffer length\n");
 493  		return -EINVAL;
 494  	}
 495 -
 496 +printk(KERN_INFO "memop mmap %pF\n", vb->vb2_queue->mem_ops->mmap);
 497  	mutex_lock(&q->mmap_lock);
 498  	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
 499  	mutex_unlock(&q->mmap_lock);
 500 @@ -2830,7 +2839,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
 501  		(read) ? "read" : "write", count, q->fileio_read_once,
 502  		q->fileio_write_immediately);
 503  
 504 -	fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
 505 +	fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL|VB2_CORE_FLAG);
 506  	if (fileio == NULL)
 507  		return -ENOMEM;
 508  
 509 @@ -3223,7 +3232,7 @@ int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
 510  	if (WARN_ON(q->fileio))
 511  		return -EBUSY;
 512  
 513 -	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
 514 +	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL|VB2_CORE_FLAG);
 515  	if (threadio == NULL)
 516  		return -ENOMEM;
 517  	threadio->fnc = fnc;
 518 diff --git drivers/media/v4l2-core/videobuf2-vmalloc.c drivers/media/v4l2-core/videobuf2-vmalloc.c
 519 index 657ab30..b9a6457 100644
 520 --- drivers/media/v4l2-core/videobuf2-vmalloc.c
 521 +++ drivers/media/v4l2-core/videobuf2-vmalloc.c
 522 @@ -21,6 +21,14 @@
 523  #include <media/videobuf2-vmalloc.h>
 524  #include <media/videobuf2-memops.h>
 525  
 526 +
 527 +#define USE_LEVEL_A_BANK	1
 528 +#ifdef USE_LEVEL_A_BANK
 529 +#define VB2_FLAG	(GFP_COLOR)
 530 +#else
 531 +#define VB2_FLAG	(0)
 532 +#endif
 533 + 
 534  struct vb2_vmalloc_buf {
 535  	void				*vaddr;
 536  	struct page			**pages;
 537 @@ -39,13 +47,18 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
 538  			       enum dma_data_direction dma_dir, gfp_t gfp_flags)
 539  {
 540  	struct vb2_vmalloc_buf *buf;
 541 -
 542 -	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
 543 +/* video buffer allocation */
 544 +printk(KERN_INFO "vb2_vmalloc_alloc(): size %ld requested\n", size);
 545 +	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags | VB2_FLAG);
 546  	if (!buf)
 547  		return NULL;
 548  
 549  	buf->size = size;
 550 -	buf->vaddr = vmalloc_user(buf->size);
 551 +#ifdef ENABLE_WORST_CASE
 552 +	buf->vaddr = vmalloc_color_user_cpu1(buf->size);
 553 +#else
 554 +	buf->vaddr = vmalloc_color_user(buf->size);
 555 +#endif
 556  	buf->dma_dir = dma_dir;
 557  	buf->handler.refcount = &buf->refcount;
 558  	buf->handler.put = vb2_vmalloc_put;
 559 @@ -81,7 +94,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
 560  	struct vm_area_struct *vma;
 561  	dma_addr_t physp;
 562  
 563 -	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 564 +	buf = kzalloc(sizeof(*buf), GFP_KERNEL | VB2_FLAG);
 565  	if (!buf)
 566  		return NULL;
 567  
 568 @@ -103,7 +116,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
 569  		last  = (vaddr + size - 1) >> PAGE_SHIFT;
 570  		buf->n_pages = last - first + 1;
 571  		buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
 572 -				     GFP_KERNEL);
 573 +				     GFP_KERNEL | VB2_FLAG);
 574  		if (!buf->pages)
 575  			goto fail_pages_array_alloc;
 576  
 577 @@ -233,12 +246,12 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *de
 578  	int ret;
 579  	int i;
 580  
 581 -	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 582 +	attach = kzalloc(sizeof(*attach), GFP_KERNEL | VB2_FLAG);
 583  	if (!attach)
 584  		return -ENOMEM;
 585  
 586  	sgt = &attach->sgt;
 587 -	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
 588 +	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL | VB2_FLAG);
 589  	if (ret) {
 590  		kfree(attach);
 591  		return ret;
 592 @@ -429,7 +442,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
 593  	if (dbuf->size < size)
 594  		return ERR_PTR(-EFAULT);
 595  
 596 -	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 597 +	buf = kzalloc(sizeof(*buf), GFP_KERNEL | VB2_FLAG);
 598  	if (!buf)
 599  		return ERR_PTR(-ENOMEM);
 600  
 601 diff --git drivers/net/ethernet/freescale/fec_main.c drivers/net/ethernet/freescale/fec_main.c
 602 index 66d47e4..192b008 100644
 603 --- drivers/net/ethernet/freescale/fec_main.c
 604 +++ drivers/net/ethernet/freescale/fec_main.c
 605 @@ -61,8 +61,17 @@
 606  
 607  #include <asm/cacheflush.h>
 608  
 609 +#include <litmus/cache_proc.h>
 610 +
 611  #include "fec.h"
 612  
 613 +#define USE_LEVEL_A_BANK	1
 614 +#ifdef USE_LEVEL_A_BANK
 615 +#define FEC_FLAG	(GFP_COLOR)
 616 +#else
 617 +#define FEC_FLAG	(0)
 618 +#endif	
 619 +
 620  static void set_multicast_list(struct net_device *ndev);
 621  static void fec_enet_itr_coal_init(struct net_device *ndev);
 622  
 623 @@ -1587,6 +1596,8 @@ fec_enet_interrupt(int irq, void *dev_id)
 624  	writel(int_events, fep->hwp + FEC_IEVENT);
 625  	fec_enet_collect_events(fep, int_events);
 626  
 627 +	enter_irq_mode();
 628 +	
 629  	if ((fep->work_tx || fep->work_rx) && fep->link) {
 630  		ret = IRQ_HANDLED;
 631  
 632 @@ -1605,6 +1616,7 @@ fec_enet_interrupt(int irq, void *dev_id)
 633  	if (fep->ptp_clock)
 634  		fec_ptp_check_pps_event(fep);
 635  
 636 +	exit_irq_mode();
 637  	return ret;
 638  }
 639  
 640 @@ -2623,7 +2635,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
 641  	struct fec_enet_priv_tx_q *txq;
 642  
 643  	for (i = 0; i < fep->num_tx_queues; i++) {
 644 -		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
 645 +		txq = kzalloc(sizeof(*txq), GFP_KERNEL|FEC_FLAG);
 646  		if (!txq) {
 647  			ret = -ENOMEM;
 648  			goto alloc_failed;
 649 @@ -2640,7 +2652,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
 650  		txq->tso_hdrs = dma_alloc_coherent(NULL,
 651  					txq->tx_ring_size * TSO_HEADER_SIZE,
 652  					&txq->tso_hdrs_dma,
 653 -					GFP_KERNEL);
 654 +					GFP_KERNEL|FEC_FLAG);
 655  		if (!txq->tso_hdrs) {
 656  			ret = -ENOMEM;
 657  			goto alloc_failed;
 658 @@ -2649,7 +2661,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
 659  
 660  	for (i = 0; i < fep->num_rx_queues; i++) {
 661  		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
 662 -					   GFP_KERNEL);
 663 +					   GFP_KERNEL|FEC_FLAG);
 664  		if (!fep->rx_queue[i]) {
 665  			ret = -ENOMEM;
 666  			goto alloc_failed;
 667 @@ -2718,7 +2730,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
 668  	txq = fep->tx_queue[queue];
 669  	bdp = txq->tx_bd_base;
 670  	for (i = 0; i < txq->tx_ring_size; i++) {
 671 -		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
 672 +		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL|FEC_FLAG);
 673  		if (!txq->tx_bounce[i])
 674  			goto err_alloc;
 675  
 676 @@ -3032,7 +3044,7 @@ static int fec_enet_init(struct net_device *ndev)
 677  
 678  	/* Allocate memory for buffer descriptors. */
 679  	cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
 680 -				      GFP_KERNEL);
 681 +				      GFP_KERNEL|FEC_FLAG);
 682  	if (!cbd_base) {
 683  		return -ENOMEM;
 684  	}
 685 diff --git drivers/usb/core/buffer.c drivers/usb/core/buffer.c
 686 index 506b969..b9af514 100644
 687 --- drivers/usb/core/buffer.c
 688 +++ drivers/usb/core/buffer.c
 689 @@ -128,6 +128,7 @@ void *hcd_buffer_alloc(
 690  		if (size <= pool_max[i])
 691  			return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
 692  	}
 693 +
 694  	return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags);
 695  }
 696  
 697 diff --git include/linux/gfp.h include/linux/gfp.h
 698 index 15928f0..92643b8 100644
 699 --- include/linux/gfp.h
 700 +++ include/linux/gfp.h
 701 @@ -35,6 +35,7 @@ struct vm_area_struct;
 702  #define ___GFP_NO_KSWAPD	0x400000u
 703  #define ___GFP_OTHER_NODE	0x800000u
 704  #define ___GFP_WRITE		0x1000000u
 705 +#define ___GFP_COLOR		0x2000000u
 706  /* If the above are modified, __GFP_BITS_SHIFT may need updating */
 707  
 708  /*
 709 @@ -94,6 +95,7 @@ struct vm_area_struct;
 710  #define __GFP_NO_KSWAPD	((__force gfp_t)___GFP_NO_KSWAPD)
 711  #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
 712  #define __GFP_WRITE	((__force gfp_t)___GFP_WRITE)	/* Allocator intends to dirty page */
 713 +#define __GFP_COLOR ((__force gfp_t)___GFP_COLOR)	/* Colored page request */
 714  
 715  /*
 716   * This may seem redundant, but it's a way of annotating false positives vs.
 717 @@ -101,7 +103,7 @@ struct vm_area_struct;
 718   */
 719  #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
 720  
 721 -#define __GFP_BITS_SHIFT 25	/* Room for N __GFP_FOO bits */
 722 +#define __GFP_BITS_SHIFT 26	/* Room for N __GFP_FOO bits */
 723  #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 724  
 725  /* This equals 0, but use constants in case they ever change */
 726 @@ -127,7 +129,7 @@ struct vm_area_struct;
 727  /* Control page allocator reclaim behavior */
 728  #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
 729  			__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
 730 -			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
 731 +			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|__GFP_COLOR)
 732  
 733  /* Control slab gfp mask during early boot */
 734  #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
 735 @@ -146,6 +148,9 @@ struct vm_area_struct;
 736  /* 4GB DMA on some platforms */
 737  #define GFP_DMA32	__GFP_DMA32
 738  
 739 +/* Colored page requests */
 740 +#define GFP_COLOR	__GFP_COLOR
 741 +
 742  /* Convert GFP flags to their corresponding migrate type */
 743  static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
 744  {
 745 diff --git include/linux/migrate.h include/linux/migrate.h
 746 index cac1c09..b16047b 100644
 747 --- include/linux/migrate.h
 748 +++ include/linux/migrate.h
 749 @@ -33,6 +33,8 @@ extern int migrate_page(struct address_space *,
 750  			struct page *, struct page *, enum migrate_mode);
 751  extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
 752  		unsigned long private, enum migrate_mode mode, int reason);
 753 +extern int replicate_pages(struct list_head *l, new_page_t new, free_page_t free,
 754 +		unsigned long private, enum migrate_mode mode, int reason);
 755  
 756  extern int migrate_prep(void);
 757  extern int migrate_prep_local(void);
 758 @@ -50,7 +52,11 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
 759  		free_page_t free, unsigned long private, enum migrate_mode mode,
 760  		int reason)
 761  	{ return -ENOSYS; }
 762 -
 763 +static inline int replicate_pages(struct list_head *l, new_page_t new,
 764 +		free_page_t free, unsigned long private, enum migrate_mode mode,
 765 +		int reason)
 766 +	{ return -ENOSYS; }
 767 +	
 768  static inline int migrate_prep(void) { return -ENOSYS; }
 769  static inline int migrate_prep_local(void) { return -ENOSYS; }
 770  
 771 diff --git include/linux/mmzone.h include/linux/mmzone.h
 772 index 54d74f6..f010005 100644
 773 --- include/linux/mmzone.h
 774 +++ include/linux/mmzone.h
 775 @@ -35,6 +35,19 @@
 776   */
 777  #define PAGE_ALLOC_COSTLY_ORDER 3
 778  
 779 +/* For page coloring - This address decoding is used in imx6-sabresd
 780 + * platform without bank interleaving .
 781 + */
 782 +#define BANK_MASK				0x38000000     
 783 +#define BANK_SHIFT 				27
 784 +#define CACHE_MASK 				0x0000f000      
 785 +#define CACHE_SHIFT				12
 786 +#define MAX_NUM_COLOR			16
 787 +#define MAX_NUM_BANK			8
 788 +//#define MAX_PARTITIONED_ORDER	3
 789 +#define MAX_PARTITIONED_ORDER	11
 790 +#define MAX_CONTIG_ORDER		3
 791 +
 792  enum {
 793  	MIGRATE_UNMOVABLE,
 794  	MIGRATE_RECLAIMABLE,
 795 @@ -157,6 +170,7 @@ enum zone_stat_item {
 796  	WORKINGSET_NODERECLAIM,
 797  	NR_ANON_TRANSPARENT_HUGEPAGES,
 798  	NR_FREE_CMA_PAGES,
 799 +	NR_FREE_HC_PAGES,
 800  	NR_VM_ZONE_STAT_ITEMS };
 801  
 802  /*
 803 @@ -476,7 +490,8 @@ struct zone {
 804  	ZONE_PADDING(_pad1_)
 805  	/* free areas of different sizes */
 806  	struct free_area	free_area[MAX_ORDER];
 807 -
 808 +	struct free_area	free_area_d[NR_CPUS][MAX_PARTITIONED_ORDER];
 809 +	
 810  	/* zone flags, see below */
 811  	unsigned long		flags;
 812  
 813 @@ -523,7 +538,9 @@ struct zone {
 814  	/* Set to true when the PG_migrate_skip bits should be cleared */
 815  	bool			compact_blockskip_flush;
 816  #endif
 817 -
 818 +	
 819 +	struct list_head	color_list[MAX_NUM_COLOR * MAX_NUM_BANK];
 820 +	DECLARE_BITMAP(color_map, MAX_NUM_COLOR*MAX_NUM_BANK);
 821  	ZONE_PADDING(_pad3_)
 822  	/* Zone statistics */
 823  	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
 824 diff --git include/linux/rmap.h include/linux/rmap.h
 825 index c89c53a..7c90e02 100644
 826 --- include/linux/rmap.h
 827 +++ include/linux/rmap.h
 828 @@ -188,7 +188,8 @@ int page_referenced(struct page *, int is_locked,
 829  #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 830  
 831  int try_to_unmap(struct page *, enum ttu_flags flags);
 832 -
 833 +int try_to_unmap_one_only(struct page *page, struct vm_area_struct *vma,
 834 +		     unsigned long address, void *arg);
 835  /*
 836   * Used by uprobes to replace a userspace page safely
 837   */
 838 diff --git include/linux/slab.h include/linux/slab.h
 839 index ffd24c8..6064df0 100644
 840 --- include/linux/slab.h
 841 +++ include/linux/slab.h
 842 @@ -87,6 +87,8 @@
 843  # define SLAB_FAILSLAB		0x00000000UL
 844  #endif
 845  
 846 +#define SLAB_NO_MERGE		0x04000000UL	/* Do not merge with existing slab */
 847 +
 848  /* The following flags affect the page allocator grouping pages by mobility */
 849  #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
 850  #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
 851 @@ -417,6 +419,12 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
 852   */
 853  static __always_inline void *kmalloc(size_t size, gfp_t flags)
 854  {
 855 +	
 856 +#ifdef CONFIG_SCHED_DEBUG_TRACE
 857 +	if (flags&GFP_COLOR)
 858 +		printk(KERN_INFO "kmalloc() is called with GFP_COLOR\n");
 859 +#endif
 860 +	
 861  	if (__builtin_constant_p(size)) {
 862  		if (size > KMALLOC_MAX_CACHE_SIZE)
 863  			return kmalloc_large(size, flags);
 864 diff --git include/linux/slub_def.h include/linux/slub_def.h
 865 index 3388511..9400aa1 100644
 866 --- include/linux/slub_def.h
 867 +++ include/linux/slub_def.h
 868 @@ -98,6 +98,8 @@ struct kmem_cache {
 869  	 */
 870  	int remote_node_defrag_ratio;
 871  #endif
 872 +	/* cpu id for higher-criticality slabs */
 873 +	int cpu_id;
 874  	struct kmem_cache_node *node[MAX_NUMNODES];
 875  };
 876  
 877 diff --git include/linux/vm_event_item.h include/linux/vm_event_item.h
 878 index 9246d32..3f5a9da 100644
 879 --- include/linux/vm_event_item.h
 880 +++ include/linux/vm_event_item.h
 881 @@ -23,7 +23,7 @@
 882  
 883  enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 884  		FOR_ALL_ZONES(PGALLOC),
 885 -		PGFREE, PGACTIVATE, PGDEACTIVATE,
 886 +		PGFREE, PGFREE_HC, PGACTIVATE, PGDEACTIVATE,
 887  		PGFAULT, PGMAJFAULT,
 888  		FOR_ALL_ZONES(PGREFILL),
 889  		FOR_ALL_ZONES(PGSTEAL_KSWAPD),
 890 diff --git include/linux/vmalloc.h include/linux/vmalloc.h
 891 index 0ec5983..2323fbe 100644
 892 --- include/linux/vmalloc.h
 893 +++ include/linux/vmalloc.h
 894 @@ -67,8 +67,11 @@ static inline void vmalloc_init(void)
 895  #endif
 896  
 897  extern void *vmalloc(unsigned long size);
 898 +extern void *vmalloc_color(unsigned long size);
 899  extern void *vzalloc(unsigned long size);
 900  extern void *vmalloc_user(unsigned long size);
 901 +extern void *vmalloc_color_user(unsigned long size);
 902 +extern void *vmalloc_color_user_cpu1(unsigned long size);
 903  extern void *vmalloc_node(unsigned long size, int node);
 904  extern void *vzalloc_node(unsigned long size, int node);
 905  extern void *vmalloc_exec(unsigned long size);
 906 diff --git include/linux/vmstat.h include/linux/vmstat.h
 907 index 82e7db7..b6410f7 100644
 908 --- include/linux/vmstat.h
 909 +++ include/linux/vmstat.h
 910 @@ -278,9 +278,12 @@ static inline void drain_zonestat(struct zone *zone,
 911  #endif		/* CONFIG_SMP */
 912  
 913  static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 914 -					     int migratetype)
 915 +					     int migratetype, int part_no)
 916  {
 917 -	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 918 +	if (part_no == NR_CPUS)
 919 +		__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 920 +	else
 921 +		__mod_zone_page_state(zone, NR_FREE_HC_PAGES, nr_pages);
 922  	if (is_migrate_cma(migratetype))
 923  		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
 924  }
 925 diff --git include/litmus/budget.h include/litmus/budget.h
 926 index bd2d5c9..60eb814 100644
 927 --- include/litmus/budget.h
 928 +++ include/litmus/budget.h
 929 @@ -33,4 +33,6 @@ static inline int requeue_preempted_job(struct task_struct* t)
 930  		(!budget_exhausted(t) || !budget_enforced(t));
 931  }
 932  
 933 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining);
 934 +
 935  #endif
 936 diff --git include/litmus/cache_proc.h include/litmus/cache_proc.h
 937 new file mode 100644
 938 index 0000000..e9440de
 939 --- /dev/null
 940 +++ include/litmus/cache_proc.h
 941 @@ -0,0 +1,17 @@
 942 +#ifndef LITMUS_CACHE_PROC_H
 943 +#define LITMUS_CACHE_PROC_H
 944 +
 945 +#ifdef __KERNEL__
 946 +
 947 +void litmus_setup_lockdown(void __iomem*, u32);
 948 +void enter_irq_mode(void);
 949 +void exit_irq_mode(void);
 950 +void flush_cache(int all);
 951 +void lock_cache(int cpu, u32 val);
 952 +
 953 +extern struct page *new_alloc_page_color(unsigned long color);
 954 +
 955 +#endif
 956 +
 957 +#endif
 958 +
 959 diff --git include/litmus/litmus.h include/litmus/litmus.h
 960 index a6eb534..441210c 100644
 961 --- include/litmus/litmus.h
 962 +++ include/litmus/litmus.h
 963 @@ -113,6 +113,13 @@ static inline lt_t litmus_clock(void)
 964  	((current)->state == TASK_RUNNING || 	\
 965  	 preempt_count() & PREEMPT_ACTIVE)
 966  
 967 +#define is_running(t) 			\
 968 +	((t)->state == TASK_RUNNING || 	\
 969 +	 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
 970 +
 971 +#define is_blocked(t)       \
 972 +	(!is_running(t))
 973 +
 974  #define is_released(t, now)	\
 975  	(lt_before_eq(get_release(t), now))
 976  #define is_tardy(t, now)    \
 977 diff --git include/litmus/mc2_common.h include/litmus/mc2_common.h
 978 new file mode 100644
 979 index 0000000..e3c0af2
 980 --- /dev/null
 981 +++ include/litmus/mc2_common.h
 982 @@ -0,0 +1,31 @@
 983 +/*
 984 + * MC^2 common data structures
 985 + */
 986 + 
 987 +#ifndef __UNC_MC2_COMMON_H__
 988 +#define __UNC_MC2_COMMON_H__
 989 +
 990 +enum crit_level {
 991 +	CRIT_LEVEL_A = 0,
 992 +	CRIT_LEVEL_B = 1,
 993 +	CRIT_LEVEL_C = 2,
 994 +	NUM_CRIT_LEVELS = 3,
 995 +};
 996 +
 997 +struct mc2_task {
 998 +	enum crit_level crit;
 999 +	unsigned int res_id;
1000 +};
1001 +
1002 +#ifdef __KERNEL__
1003 +
1004 +#include <litmus/reservation.h>
1005 +
1006 +#define tsk_mc2_data(t)		(tsk_rt(t)->mc2_data)
1007 +
1008 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
1009 +							struct reservation *res);
1010 +	
1011 +#endif /* __KERNEL__ */
1012 +
1013 +#endif
1014 \ No newline at end of file
1015 diff --git include/litmus/page_dev.h include/litmus/page_dev.h
1016 new file mode 100644
1017 index 0000000..c6874ac
1018 --- /dev/null
1019 +++ include/litmus/page_dev.h
1020 @@ -0,0 +1,28 @@
1021 +/*
1022 + * page_dev.h - Implementation of the page coloring for cache and bank partition. 
1023 + * Author: Namhoon Kim (namhoonk@cs.unc.edu)
1024 + */
1025 + 
1026 +#ifndef _LITMUS_PAGE_DEV_H
1027 +#define _LITMUS_PAGE_DEV_H
1028 +
1029 +#include <linux/init.h>
1030 +#include <linux/types.h>
1031 +#include <linux/kernel.h>
1032 +#include <linux/module.h>
1033 +#include <linux/sysctl.h>
1034 +#include <linux/slab.h>
1035 +#include <linux/io.h>
1036 +#include <linux/mutex.h>
1037 +
1038 +#include <litmus/sched_trace.h>
1039 +#include <litmus/litmus.h>
1040 +
1041 +int llc_partition_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos);
1042 +int dram_partition_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos);
1043 +int bank_to_partition(unsigned int bank);
1044 +int get_area_index(int cpu);
1045 +int is_in_correct_bank(struct page* page, int cpu);
1046 +int is_in_llc_partition(struct page* page, int cpu);
1047 +
1048 +#endif /* _LITMUS_PAGE_DEV_H */
1049 \ No newline at end of file
1050 diff --git include/litmus/polling_reservations.h include/litmus/polling_reservations.h
1051 new file mode 100644
1052 index 0000000..66c9b1e
1053 --- /dev/null
1054 +++ include/litmus/polling_reservations.h
1055 @@ -0,0 +1,36 @@
1056 +#ifndef LITMUS_POLLING_RESERVATIONS_H
1057 +#define LITMUS_POLLING_RESERVATIONS_H
1058 +
1059 +#include <litmus/reservation.h>
1060 +
1061 +struct polling_reservation {
1062 +	/* extend basic reservation */
1063 +	struct reservation res;
1064 +
1065 +	lt_t max_budget;
1066 +	lt_t period;
1067 +	lt_t deadline;
1068 +	lt_t offset;
1069 +};
1070 +
1071 +void polling_reservation_init(struct polling_reservation *pres, int use_edf_prio,
1072 +	int use_periodic_polling, lt_t budget, lt_t period, lt_t deadline, lt_t offset);
1073 +
1074 +struct table_driven_reservation {
1075 +	/* extend basic reservation */
1076 +	struct reservation res;
1077 +
1078 +	lt_t major_cycle;
1079 +	unsigned int next_interval;
1080 +	unsigned int num_intervals;
1081 +	struct lt_interval *intervals;
1082 +
1083 +	/* info about current scheduling slot */
1084 +	struct lt_interval cur_interval;
1085 +	lt_t major_cycle_start;
1086 +};
1087 +
1088 +void table_driven_reservation_init(struct table_driven_reservation *tdres,
1089 +	lt_t major_cycle, struct lt_interval *intervals, unsigned int num_intervals);
1090 +
1091 +#endif
1092 diff --git include/litmus/replicate_lib.h include/litmus/replicate_lib.h
1093 new file mode 100644
1094 index 0000000..13e0c3a
1095 --- /dev/null
1096 +++ include/litmus/replicate_lib.h
1097 @@ -0,0 +1,19 @@
1098 +#ifndef LITMUS_REPLICATE_LIB_H
1099 +#define LITMUS_REPLICATE_LIB_H
1100 +
1101 +#include <linux/list.h>
1102 +#include <linux/mm_types.h>
1103 +#include <linux/mm_inline.h>
1104 +
1105 +/* Data structure for the "master" list */
1106 +struct shared_lib_page {
1107 +	struct page *master_page;
1108 +	struct page *r_page[NR_CPUS+1];
1109 +	unsigned long int master_pfn;
1110 +	unsigned long int r_pfn[NR_CPUS+1];
1111 +	struct list_head list;
1112 +};
1113 +
1114 +extern struct list_head shared_lib_pages;
1115 +
1116 +#endif
1117 diff --git include/litmus/reservation.h include/litmus/reservation.h
1118 new file mode 100644
1119 index 0000000..7e022b3
1120 --- /dev/null
1121 +++ include/litmus/reservation.h
1122 @@ -0,0 +1,256 @@
1123 +#ifndef LITMUS_RESERVATION_H
1124 +#define LITMUS_RESERVATION_H
1125 +
1126 +#include <linux/list.h>
1127 +#include <linux/hrtimer.h>
1128 +
1129 +struct reservation_client;
1130 +struct reservation_environment;
1131 +struct reservation;
1132 +
1133 +typedef enum {
1134 +	/* reservation has no clients, is not consuming budget */
1135 +	RESERVATION_INACTIVE = 0,
1136 +
1137 +	/* reservation has clients, consumes budget when scheduled */
1138 +	RESERVATION_ACTIVE,
1139 +
1140 +	/* reservation has no clients, but may be consuming budget */
1141 +	RESERVATION_ACTIVE_IDLE,
1142 +
1143 +	/* Reservation has no budget and waits for
1144 +	 * replenishment. May or may not have clients. */
1145 +	RESERVATION_DEPLETED,
1146 +} reservation_state_t;
1147 +
1148 +
1149 +/* ************************************************************************** */
1150 +
1151 +/* Select which task to dispatch. If NULL is returned, it means there is nothing
1152 + * to schedule right now and background work can be scheduled. */
1153 +typedef struct task_struct * (*dispatch_t)  (
1154 +	struct reservation_client *client
1155 +);
1156 +
1157 +/* Something that can be managed in a reservation and that can yield
1158 + * a process for dispatching. Contains a pointer to the reservation
1159 + * to which it "belongs". */
1160 +struct reservation_client {
1161 +	struct list_head list;
1162 +	struct reservation* reservation;
1163 +	dispatch_t dispatch;
1164 +};
1165 +
1166 +
1167 +/* ************************************************************************** */
1168 +
1169 +/* Called by reservations to request state change. */
1170 +typedef void (*reservation_change_state_t)  (
1171 +	struct reservation_environment* env,
1172 +	struct reservation *res,
1173 +	reservation_state_t new_state
1174 +);
1175 +
1176 +/* The framework within wich reservations operate. */
1177 +struct reservation_environment {
1178 +	lt_t time_zero;
1179 +	lt_t current_time;
1180 +
1181 +	/* services invoked by reservations */
1182 +	reservation_change_state_t change_state;
1183 +};
1184 +
1185 +
1186 +/* ************************************************************************** */
1187 +
1188 +/* A new client is added or an existing client resumes. */
1189 +typedef void (*client_arrives_t)  (
1190 +	struct reservation *reservation,
1191 +	struct reservation_client *client
1192 +);
1193 +
1194 +/* A client suspends or terminates. */
1195 +typedef void (*client_departs_t)  (
1196 +	struct reservation *reservation,
1197 +	struct reservation_client *client,
1198 +	int did_signal_job_completion
1199 +);
1200 +
1201 +/* A previously requested replenishment has occurred. */
1202 +typedef void (*on_replenishment_timer_t)  (
1203 +	struct reservation *reservation
1204 +);
1205 +
1206 +/* Update the reservation's budget to reflect execution or idling. */
1207 +typedef void (*drain_budget_t) (
1208 +	struct reservation *reservation,
1209 +	lt_t how_much
1210 +);
1211 +
1212 +/* Select a ready task from one of the clients for scheduling. */
1213 +typedef struct task_struct* (*dispatch_client_t)  (
1214 +	struct reservation *reservation,
1215 +	lt_t *time_slice /* May be used to force rescheduling after
1216 +	                    some amount of time. 0 => no limit */
1217 +);
1218 +
1219 +
1220 +struct reservation_ops {
1221 +	dispatch_client_t dispatch_client;
1222 +
1223 +	client_arrives_t client_arrives;
1224 +	client_departs_t client_departs;
1225 +
1226 +	on_replenishment_timer_t replenish;
1227 +	drain_budget_t drain_budget;
1228 +};
1229 +
1230 +struct reservation {
1231 +	/* used to queue in environment */
1232 +	struct list_head list;
1233 +
1234 +	reservation_state_t state;
1235 +	unsigned int id;
1236 +
1237 +	/* exact meaning defined by impl. */
1238 +	lt_t priority;
1239 +	lt_t cur_budget;
1240 +	lt_t next_replenishment;
1241 +
1242 +	/* budget stats */
1243 +	lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
1244 +	lt_t budget_consumed_total;
1245 +
1246 +	/* interaction with framework */
1247 +	struct reservation_environment *env;
1248 +	struct reservation_ops *ops;
1249 +
1250 +	struct list_head clients;
1251 +	
1252 +	/* for global env. */
1253 +	int scheduled_on;
1254 +	int event_added;
1255 +	/* for blocked by ghost. Do not charge budget when ACTIVE */
1256 +	int blocked_by_ghost;
1257 +	/* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
1258 +	int is_ghost;
1259 +};
1260 +
1261 +void reservation_init(struct reservation *res);
1262 +
1263 +/* Default implementations */
1264 +
1265 +/* simply select the first client in the list, set *for_at_most to zero */
1266 +struct task_struct* default_dispatch_client(
1267 +	struct reservation *res,
1268 +	lt_t *for_at_most
1269 +);
1270 +
1271 +/* "connector" reservation client to hook up tasks with reservations */
1272 +struct task_client {
1273 +	struct reservation_client client;
1274 +	struct task_struct *task;
1275 +};
1276 +
1277 +void task_client_init(struct task_client *tc, struct task_struct *task,
1278 +	struct reservation *reservation);
1279 +
1280 +#define SUP_RESCHEDULE_NOW (0)
1281 +#define SUP_NO_SCHEDULER_UPDATE (ULLONG_MAX)
1282 +
1283 +/* A simple uniprocessor (SUP) flat (i.e., non-hierarchical) reservation
1284 + * environment.
1285 + */
1286 +struct sup_reservation_environment {
1287 +	struct reservation_environment env;
1288 +
1289 +	/* ordered by priority */
1290 +	struct list_head active_reservations;
1291 +
1292 +	/* ordered by next_replenishment */
1293 +	struct list_head depleted_reservations;
1294 +
1295 +	/* unordered */
1296 +	struct list_head inactive_reservations;
1297 +
1298 +	/* - SUP_RESCHEDULE_NOW means call sup_dispatch() now
1299 +	 * - SUP_NO_SCHEDULER_UPDATE means nothing to do
1300 +	 * any other value means program a timer for the given time
1301 +	 */
1302 +	lt_t next_scheduler_update;
1303 +	/* set to true if a call to sup_dispatch() is imminent */
1304 +	bool will_schedule;
1305 +};
1306 +
1307 +/* Contract:
1308 + *  - before calling into sup_ code, or any reservation methods,
1309 + *    update the time with sup_update_time(); and
1310 + *  - after calling into sup_ code, or any reservation methods,
1311 + *    check next_scheduler_update and program timer or trigger
1312 + *    scheduler invocation accordingly.
1313 + */
1314 +
1315 +void sup_init(struct sup_reservation_environment* sup_env);
1316 +void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
1317 +	struct reservation* new_res);
1318 +void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
1319 +	lt_t timeout);
1320 +void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
1321 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
1322 +
1323 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
1324 +	unsigned int id);
1325 +	
1326 +/* A global multiprocessor reservation environment. */
1327 +
1328 +typedef enum {
1329 +	EVENT_REPLENISH = 0,
1330 +	EVENT_DRAIN,
1331 +	EVENT_OTHERS,
1332 +} event_type_t;
1333 +
1334 +
1335 +struct next_timer_event {
1336 +	lt_t next_update;
1337 +	int timer_armed_on;
1338 +	unsigned int id;
1339 +	event_type_t type;
1340 +	struct list_head list;
1341 +};
1342 +
1343 +struct gmp_reservation_environment {
1344 +	raw_spinlock_t lock;
1345 +	struct reservation_environment env;
1346 +
1347 +	/* ordered by priority */
1348 +	struct list_head active_reservations;
1349 +
1350 +	/* ordered by next_replenishment */
1351 +	struct list_head depleted_reservations;
1352 +
1353 +	/* unordered */
1354 +	struct list_head inactive_reservations;
1355 +
1356 +	/* timer event ordered by next_update */
1357 +	struct list_head next_events;
1358 +	
1359 +	/* (schedule_now == true) means call gmp_dispatch() now */
1360 +	int schedule_now;
1361 +	/* set to true if a call to gmp_dispatch() is imminent */
1362 +	bool will_schedule;
1363 +};
1364 +
1365 +void gmp_init(struct gmp_reservation_environment* gmp_env);
1366 +void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
1367 +	struct reservation* new_res);
1368 +void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
1369 +	lt_t timeout, unsigned int id, event_type_t type);
1370 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
1371 +int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
1372 +struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
1373 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
1374 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
1375 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
1376 +	unsigned int id);
1377 +
1378 +#endif
1379 diff --git include/litmus/rt_param.h include/litmus/rt_param.h
1380 index 7b9a909..56de045 100644
1381 --- include/litmus/rt_param.h
1382 +++ include/litmus/rt_param.h
1383 @@ -51,6 +51,16 @@ typedef enum {
1384  	TASK_EARLY
1385  } release_policy_t;
1386  
1387 +#ifdef CONFIG_PGMRT_SUPPORT
1388 +typedef enum {
1389 +    PGM_NOT_A_NODE,
1390 +    PGM_SRC,
1391 +    PGM_SINK,
1392 +    PGM_SRC_SINK,
1393 +    PGM_INTERNAL
1394 +} pgm_node_type_t;
1395 +#endif
1396 +
1397  /* We use the common priority interpretation "lower index == higher priority",
1398   * which is commonly used in fixed-priority schedulability analysis papers.
1399   * So, a numerically lower priority value implies higher scheduling priority,
1400 @@ -62,6 +72,7 @@ typedef enum {
1401  #define LITMUS_MAX_PRIORITY     512
1402  #define LITMUS_HIGHEST_PRIORITY   1
1403  #define LITMUS_LOWEST_PRIORITY    (LITMUS_MAX_PRIORITY - 1)
1404 +#define LITMUS_NO_PRIORITY		UINT_MAX
1405  
1406  /* Provide generic comparison macros for userspace,
1407   * in case that we change this later. */
1408 @@ -71,6 +82,46 @@ typedef enum {
1409  	((p) >= LITMUS_HIGHEST_PRIORITY &&	\
1410  	 (p) <= LITMUS_LOWEST_PRIORITY)
1411  
1412 +/* reservation support */
1413 +
1414 +typedef enum {
1415 +	PERIODIC_POLLING,
1416 +	SPORADIC_POLLING,
1417 +	TABLE_DRIVEN,
1418 +} reservation_type_t;
1419 +
1420 +struct lt_interval {
1421 +	lt_t start;
1422 +	lt_t end;
1423 +};
1424 +
1425 +#ifndef __KERNEL__
1426 +#define __user
1427 +#endif
1428 +
1429 +struct reservation_config {
1430 +	unsigned int id;
1431 +	lt_t priority;
1432 +	int  cpu;
1433 +
1434 +	union {
1435 +		struct {
1436 +			lt_t period;
1437 +			lt_t budget;
1438 +			lt_t relative_deadline;
1439 +			lt_t offset;
1440 +		} polling_params;
1441 +
1442 +		struct {
1443 +			lt_t major_cycle_length;
1444 +			unsigned int num_intervals;
1445 +			struct lt_interval __user *intervals;
1446 +		} table_driven_params;
1447 +	};
1448 +};
1449 +
1450 +/* regular sporadic task support */
1451 +
1452  struct rt_task {
1453  	lt_t 		exec_cost;
1454  	lt_t 		period;
1455 @@ -81,6 +132,10 @@ struct rt_task {
1456  	task_class_t	cls;
1457  	budget_policy_t  budget_policy;  /* ignored by pfair */
1458  	release_policy_t release_policy;
1459 +#ifdef CONFIG_PGMRT_SUPPORT
1460 +	pgm_node_type_t	pgm_type;
1461 +	lt_t			pgm_expected_etoe;
1462 +#endif
1463  };
1464  
1465  union np_flag {
1466 @@ -121,6 +176,13 @@ struct control_page {
1467  	uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
1468  				     * started. */
1469  
1470 +#ifdef CONFIG_PGMRT_SUPPORT
1471 +    /* Flags from userspace signifying PGM wait states. */
1472 +    volatile uint32_t   pgm_waiting;    /* waiting for tokens */
1473 +    volatile uint32_t   pgm_sending;    /* sending tokens */
1474 +    volatile uint32_t   pgm_satisfied;  /* done waiting/sending */
1475 +#endif
1476 +
1477  	/* to be extended */
1478  };
1479  
1480 @@ -165,6 +227,7 @@ struct rt_job {
1481  };
1482  
1483  struct pfair_param;
1484 +struct mc2_task;
1485  
1486  /*	RT task parameters for scheduling extensions
1487   *	These parameters are inherited during clone and therefore must
1488 @@ -246,7 +309,10 @@ struct rt_param {
1489  	volatile int		linked_on;
1490  
1491  	/* PFAIR/PD^2 state. Allocated on demand. */
1492 -	struct pfair_param*	pfair;
1493 +	union {
1494 +		void *plugin_state;
1495 +		struct pfair_param *pfair;
1496 +	};
1497  
1498  	/* Fields saved before BE->RT transition.
1499  	 */
1500 @@ -275,6 +341,10 @@ struct rt_param {
1501  
1502  	/* Pointer to the page shared between userspace and kernel. */
1503  	struct control_page * ctrl_page;
1504 +
1505 +	/* Mixed-criticality specific data */
1506 +	struct mc2_task* mc2_data;
1507 +	unsigned long addr_ctrl_page;
1508  };
1509  
1510  #endif
1511 diff --git include/litmus/sched_plugin.h include/litmus/sched_plugin.h
1512 index 0ccccd6..4c8aaa6 100644
1513 --- include/litmus/sched_plugin.h
1514 +++ include/litmus/sched_plugin.h
1515 @@ -77,6 +77,17 @@ typedef long (*wait_for_release_at_t)(lt_t release_time);
1516  /* Informs the plugin when a synchronous release takes place. */
1517  typedef void (*synchronous_release_at_t)(lt_t time_zero);
1518  
1519 +/* How much budget has the current task consumed so far, and how much
1520 + * has it left? The default implementation ties into the per-task
1521 + * budget enforcement code. Plugins can override this to report
1522 + * reservation-specific values. */
1523 +typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining);
1524 +
1525 +/* Reservation creation/removal backends. Meaning of reservation_type and
1526 + * reservation_id are entirely plugin-specific. */
1527 +typedef long (*reservation_create_t)(int reservation_type, void* __user config);
1528 +typedef long (*reservation_destroy_t)(unsigned int reservation_id, int cpu);
1529 +
1530  /************************ misc routines ***********************/
1531  
1532  
1533 @@ -109,6 +120,12 @@ struct sched_plugin {
1534  	task_exit_t 		task_exit;
1535  	task_cleanup_t		task_cleanup;
1536  
1537 +	current_budget_t	current_budget;
1538 +
1539 +	/* Reservation support */
1540 +	reservation_create_t	reservation_create;
1541 +	reservation_destroy_t	reservation_destroy;
1542 +
1543  #ifdef CONFIG_LITMUS_LOCKING
1544  	/*	locking protocols	*/
1545  	allocate_lock_t		allocate_lock;
1546 diff --git include/litmus/trace.h include/litmus/trace.h
1547 index 6017872..7d36a11 100644
1548 --- include/litmus/trace.h
1549 +++ include/litmus/trace.h
1550 @@ -118,6 +118,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
1551  #define TS_TICK_START(t)		CPU_TTIMESTAMP(110, t)
1552  #define TS_TICK_END(t) 			CPU_TTIMESTAMP(111, t)
1553  
1554 +#define TS_RELEASE_C_START		CPU_DTIMESTAMP(108, TSK_RT)
1555 +#define TS_RELEASE_C_END		CPU_DTIMESTAMP(109, TSK_RT)
1556 +
1557  #define TS_QUANTUM_BOUNDARY_START	CPU_TIMESTAMP_CUR(112)
1558  #define TS_QUANTUM_BOUNDARY_END		CPU_TIMESTAMP_CUR(113)
1559  
1560 @@ -137,6 +140,27 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
1561  #define TS_SEND_RESCHED_START(c)	MSG_TIMESTAMP_SENT(190, c)
1562  #define TS_SEND_RESCHED_END		MSG_TIMESTAMP_RECEIVED(191)
1563  
1564 -#define TS_RELEASE_LATENCY(when)	CPU_LTIMESTAMP(208, &(when))
1565 +#define TS_ISR_START			CPU_TIMESTAMP_CUR(192)
1566 +#define TS_ISR_END				CPU_TIMESTAMP_CUR(193)
1567 +
1568 +/* For RTAS2018 */
1569 +#define TS_NET_RX_HARDIRQ_START		CPU_TIMESTAMP_CUR(194)
1570 +#define TS_NET_RX_HARDIRQ_END		CPU_TIMESTAMP_CUR(195)
1571 +
1572 +#define TS_NET_RX_SOFTIRQ_START		CPU_TIMESTAMP_CUR(196)
1573 +#define TS_NET_RX_SOFTIRQ_END		CPU_TIMESTAMP_CUR(197)
1574 +
1575 +#define TS_UVC_IRQ_START			CPU_TIMESTAMP_CUR(198)
1576 +#define TS_UVC_IRQ_END				CPU_TIMESTAMP_CUR(199)
1577 +
1578 +#define TS_RELEASE_LATENCY(when)    CPU_LTIMESTAMP(208, &(when))
1579 +#define TS_RELEASE_LATENCY_A(when)  CPU_LTIMESTAMP(209, &(when))
1580 +#define TS_RELEASE_LATENCY_B(when)  CPU_LTIMESTAMP(210, &(when))
1581 +#define TS_RELEASE_LATENCY_C(when)  CPU_LTIMESTAMP(211, &(when))
1582 +
1583 +#define TS_SCHED_A_START			CPU_DTIMESTAMP(212, TSK_UNKNOWN)
1584 +#define TS_SCHED_A_END(t)			CPU_TTIMESTAMP(213, t)
1585 +#define TS_SCHED_C_START			CPU_DTIMESTAMP(214, TSK_UNKNOWN)
1586 +#define TS_SCHED_C_END(t)			CPU_TTIMESTAMP(215, t)
1587  
1588  #endif /* !_SYS_TRACE_H_ */
1589 diff --git include/litmus/unistd_32.h include/litmus/unistd_32.h
1590 index 94264c2..86bbbb8d 100644
1591 --- include/litmus/unistd_32.h
1592 +++ include/litmus/unistd_32.h
1593 @@ -17,5 +17,13 @@
1594  #define __NR_wait_for_ts_release __LSC(9)
1595  #define __NR_release_ts		__LSC(10)
1596  #define __NR_null_call		__LSC(11)
1597 +#define __NR_get_current_budget __LSC(12)
1598 +#define __NR_reservation_create	__LSC(13)
1599 +#define __NR_reservation_destroy	__LSC(14)
1600 +#define __NR_set_mc2_task_param	__LSC(15)
1601 +#define __NR_set_page_color		__LSC(16)
1602 +#define __NR_test_call		__LSC(17)
1603 +#define __NR_run_test		__LSC(18)
1604 +#define __NR_lock_buffer	__LSC(19)
1605  
1606 -#define NR_litmus_syscalls 12
1607 +#define NR_litmus_syscalls	20
1608 diff --git include/litmus/unistd_64.h include/litmus/unistd_64.h
1609 index d5ced0d..4b96e7c 100644
1610 --- include/litmus/unistd_64.h
1611 +++ include/litmus/unistd_64.h
1612 @@ -29,5 +29,22 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
1613  __SYSCALL(__NR_release_ts, sys_release_ts)
1614  #define __NR_null_call				__LSC(11)
1615  __SYSCALL(__NR_null_call, sys_null_call)
1616 +#define __NR_get_current_budget			__LSC(12)
1617 +__SYSCALL(__NR_get_current_budget, sys_get_current_budget)
1618 +#define __NR_reservation_create		__LSC(13)
1619 +__SYSCALL(__NR_reservation_create, sys_reservation_create)
1620 +#define __NR_reservation_destroy	__LSC(14)
1621 +__SYSCALL(__NR_reservation_destroy, sys_reservation_destroy)
1622 +#define __NR_set_mc2_task_param		__LSC(15)
1623 +__SYSCALL(__NR_set_mc2_task_param,	sys_set_mc2_task_param)
1624 +#define __NR_set_page_color			__LSC(16)
1625 +__SYSCALL(__NR_set_page_color,		sys_set_page_color)
1626 +#define __NR_test_call				__LSC(17)
1627 +__SYSCALL(__NR_test_call, sys_test_call)
1628 +#define __NR_run_test				__LSC(18)
1629 +__SYSCALL(__NR_run_test, sys_run_test)
1630 +#define __NR_lock_buffer			__LSC(19)
1631 +__SYACALL(__NR_lock_buffer, sys_lock_buffer)
1632  
1633 -#define NR_litmus_syscalls 12
1634 +
1635 +#define NR_litmus_syscalls 20
1636 diff --git kernel/irq/handle.c kernel/irq/handle.c
1637 index 6354802..81df1b2 100644
1638 --- kernel/irq/handle.c
1639 +++ kernel/irq/handle.c
1640 @@ -18,6 +18,9 @@
1641  
1642  #include <trace/events/irq.h>
1643  
1644 +#include <litmus/litmus.h>
1645 +#include <litmus/trace.h>
1646 +
1647  #include "internals.h"
1648  
1649  /**
1650 @@ -138,11 +141,9 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
1651  
1652  	do {
1653  		irqreturn_t res;
1654 -
1655  		trace_irq_handler_entry(irq, action);
1656  		res = action->handler(irq, action->dev_id);
1657  		trace_irq_handler_exit(irq, action, res);
1658 -
1659  		if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
1660  			      irq, action->handler))
1661  			local_irq_disable();
1662 diff --git kernel/sched/litmus.c kernel/sched/litmus.c
1663 index 9d58690..cd36358 100644
1664 --- kernel/sched/litmus.c
1665 +++ kernel/sched/litmus.c
1666 @@ -20,8 +20,9 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p)
1667  	/* task counter */
1668  	p->se.sum_exec_runtime += delta;
1669  	if (delta) {
1670 -		TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
1671 -			delta, p->rt_param.job_params.exec_time, budget_remaining(p));
1672 +		//TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
1673 +			//delta, p->rt_param.job_params.exec_time, budget_remaining(p));
1674 +		;
1675  	}
1676  	/* sched_clock() */
1677  	p->se.exec_start = rq->clock;
1678 diff --git kernel/softirq.c kernel/softirq.c
1679 index 99fe8b8..161450e 100644
1680 --- kernel/softirq.c
1681 +++ kernel/softirq.c
1682 @@ -27,6 +27,9 @@
1683  #include <linux/tick.h>
1684  #include <linux/irq.h>
1685  
1686 +/* for measuring NET_RX bottom half */
1687 +#include <litmus/trace.h>
1688 +
1689  #define CREATE_TRACE_POINTS
1690  #include <trace/events/irq.h>
1691  
1692 diff --git litmus/Makefile litmus/Makefile
1693 index 7970cd5..ccd532d 100644
1694 --- litmus/Makefile
1695 +++ litmus/Makefile
1696 @@ -11,6 +11,7 @@ obj-y     = sched_plugin.o litmus.o \
1697  	    sync.o \
1698  	    rt_domain.o \
1699  	    edf_common.o \
1700 +		mc2_common.o \
1701  	    fp_common.o \
1702  	    fdso.o \
1703  	    locking.o \
1704 @@ -19,13 +20,21 @@ obj-y     = sched_plugin.o litmus.o \
1705  	    binheap.o \
1706  	    ctrldev.o \
1707  	    uncachedev.o \
1708 +		reservation.o \
1709 +		polling_reservations.o \
1710  	    sched_gsn_edf.o \
1711  	    sched_psn_edf.o \
1712 -	    sched_pfp.o
1713 +	    sched_pfp.o \
1714 +		sched_mc2.o \
1715 +		bank_proc.o \
1716 +	    color_shm.o \
1717 +		replicate_lib.o \
1718 +		cache_proc.o \
1719 +		page_dev.o \
1720 +		fakedev0.o 
1721  
1722  obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
1723  obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
1724 -
1725  obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
1726  obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
1727  obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
1728 diff --git litmus/bank_proc.c litmus/bank_proc.c
1729 new file mode 100644
1730 index 0000000..353d38d
1731 --- /dev/null
1732 +++ litmus/bank_proc.c
1733 @@ -0,0 +1,773 @@
1734 +/*
1735 + * bank_proc.c -- Implementation of the page coloring for cache and bank partition. 
1736 + *                The file will keep a pool of colored pages. Users can require pages with 
1737 + *                specific color or bank number.
1738 + *                Part of the code is modified from Jonathan Herman's code  
1739 + */
1740 +#include <linux/init.h>
1741 +#include <linux/types.h>
1742 +#include <linux/kernel.h>
1743 +#include <linux/module.h>
1744 +#include <linux/sysctl.h>
1745 +#include <linux/slab.h>
1746 +#include <linux/io.h>
1747 +#include <linux/mutex.h>
1748 +#include <linux/mm.h>
1749 +#include <linux/random.h>
1750 +
1751 +#include <litmus/litmus_proc.h>
1752 +#include <litmus/sched_trace.h>
1753 +#include <litmus/litmus.h>
1754 +
1755 +#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
1756 +
1757 +// This Address Decoding is used in imx6-sabredsd platform
1758 +#define BANK_MASK  0x38000000     
1759 +#define BANK_SHIFT  27
1760 +#define CACHE_MASK  0x0000f000      
1761 +#define CACHE_SHIFT 12
1762 +
1763 +#define PAGES_PER_COLOR 2000
1764 +#define NUM_BANKS	8
1765 +#define NUM_COLORS	16
1766 +
1767 +unsigned int NUM_PAGE_LIST;  //8*16
1768 +
1769 +unsigned int number_banks;
1770 +unsigned int number_cachecolors;
1771 +
1772 +unsigned int set_partition_max = 0x0000ffff;
1773 +unsigned int set_partition_min = 0;
1774 +unsigned int bank_partition_max = 0x000000ff;
1775 +unsigned int bank_partition_min = 0;
1776 +
1777 +int show_page_pool = 0;
1778 +int refill_page_pool = 0;
1779 +spinlock_t reclaim_lock;
1780 +
1781 +unsigned int set_partition[9] = {
1782 +        0x00000003,  /* Core 0, and Level A*/
1783 +        0x00000003,  /* Core 0, and Level B*/
1784 +        0x0000000C,  /* Core 1, and Level A*/
1785 +        0x0000000C,  /* Core 1, and Level B*/
1786 +        0x00000030,  /* Core 2, and Level A*/
1787 +        0x00000030,  /* Core 2, and Level B*/
1788 +        0x000000C0,  /* Core 3, and Level A*/
1789 +        0x000000C0,  /* Core 3, and Level B*/
1790 +        0x0000ff00,  /* Level C */
1791 +};
1792 +
1793 +unsigned int bank_partition[9] = {
1794 +        0x00000010,  /* Core 0, and Level A*/
1795 +        0x00000010,  /* Core 0, and Level B*/
1796 +        0x00000020,  /* Core 1, and Level A*/
1797 +        0x00000020,  /* Core 1, and Level B*/
1798 +        0x00000040,  /* Core 2, and Level A*/
1799 +        0x00000040,  /* Core 2, and Level B*/
1800 +        0x00000080,  /* Core 3, and Level A*/
1801 +        0x00000080,  /* Core 3, and Level B*/
1802 +        0x0000000c,  /* Level C */
1803 +};
1804 +
1805 +unsigned int set_index[9] = {
1806 +    0, 0, 0, 0, 0, 0, 0, 0, 0
1807 +};
1808 +
1809 +unsigned int bank_index[9] = {
1810 +    0, 0, 0, 0, 0, 0, 0, 0, 0
1811 +};
1812 +
1813 +int node_index[9] = {
1814 +    -1, -1, -1, -1, -1, -1, -1, -1, -1
1815 +};
1816 +
1817 +struct mutex void_lockdown_proc;
1818 +
1819 +/*
1820 + * Every page list should contain a lock, a list, and a number recording how many pages it store
1821 + */ 
1822 +struct color_group {
1823 +	spinlock_t lock;
1824 +	char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
1825 +	struct list_head list;
1826 +	atomic_t nr_pages;
1827 +};
1828 +
1829 +
1830 +static struct color_group *color_groups;
1831 +
1832 +/*
1833 + * Naive function to count the number of 1's
1834 + */
1835 +unsigned int counting_one_set(unsigned int v)
1836 +{
1837 +    unsigned int c; // c accumulates the total bits set in v
1838 +
1839 +    for (c = 0; v; v >>= 1)
1840 +    {
1841 +        c += v & 1;
1842 +    }
1843 +    return c;
1844 +}
1845 +
1846 +unsigned int two_exp(unsigned int e)
1847 +{
1848 +    unsigned int v = 1;
1849 +    for (; e>0; e-- )
1850 +    {
1851 +        v=v*2;
1852 +    }
1853 +    return v;
1854 +}
1855 +
1856 +/* helper functions to find the next colored pool index */
1857 +static inline unsigned int first_index(unsigned long node)
1858 +{
1859 +	unsigned int bank_no = 0, color_no = 0;
1860 +	
1861 +	while(bank_no < NUM_BANKS) {
1862 +		if ((bank_partition[node]>>bank_no) & 0x1)
1863 +			break;
1864 +		bank_no++;
1865 +	}
1866 +	while(color_no < NUM_COLORS) {
1867 +		if ((set_partition[node]>>color_no) & 0x1)
1868 +			break;
1869 +		color_no++;
1870 +	}
1871 +	return NUM_COLORS*bank_no + color_no; 
1872 +}
1873 +
1874 +static inline unsigned int last_index(unsigned long node)
1875 +{
1876 +	unsigned int bank_no = NUM_BANKS-1, color_no = NUM_COLORS-1;
1877 +	
1878 +	while(bank_no >= 0) {
1879 +		if ((bank_partition[node]>>bank_no) & 0x1)
1880 +			break;
1881 +		bank_no--;
1882 +	}
1883 +	while(color_no >= 0) {
1884 +		if ((set_partition[node]>>color_no) & 0x1)
1885 +			break;
1886 +		color_no--;
1887 +	}
1888 +	return NUM_COLORS*bank_no + color_no; 
1889 +}
1890 +
1891 +static inline unsigned int next_color(unsigned long node, unsigned int current_color)
1892 +{
1893 +	int try = 0, ret = 0;
1894 +	current_color++;
1895 +	if (current_color == NUM_COLORS) {
1896 +		current_color = 0;
1897 +		ret = 1;
1898 +	}
1899 +	
1900 +	while (try < NUM_COLORS) {
1901 +		if ((set_partition[node]>>current_color)&0x1)
1902 +			break;
1903 +		current_color++;
1904 +		if (current_color == NUM_COLORS) {
1905 +			current_color = 0;
1906 +			ret = 1;
1907 +		}
1908 +		try++;
1909 +	}
1910 +	if (!ret)
1911 +		return current_color;
1912 +	else
1913 +		return current_color + NUM_COLORS;
1914 +}
1915 +
1916 +static inline unsigned int next_bank(unsigned long node, unsigned int current_bank)
1917 +{
1918 +	int try = 0;
1919 +	current_bank++;
1920 +	if (current_bank == NUM_BANKS) {
1921 +		current_bank = 0;
1922 +	}
1923 +	
1924 +	while (try < NUM_BANKS) {
1925 +		if ((bank_partition[node]>>current_bank)&0x1)
1926 +			break;
1927 +		current_bank++;
1928 +		if (current_bank == NUM_BANKS) {
1929 +			current_bank = 0;
1930 +		}
1931 +		try++;
1932 +	}
1933 +	return current_bank;
1934 +}
1935 +
1936 +static inline unsigned int get_next_index(unsigned long node, unsigned int current_index)
1937 +{
1938 +	unsigned int bank_no, color_no, color_ret, bank_ret;
1939 +	bank_no = current_index>>4; // 2^4 = 16 colors
1940 +	color_no = current_index - bank_no*NUM_COLORS;
1941 +	bank_ret = bank_no;
1942 +	color_ret = next_color(node, color_no);
1943 +	if (color_ret >= NUM_COLORS) {
1944 +		// next bank
1945 +		color_ret -= NUM_COLORS;
1946 +		bank_ret = next_bank(node, bank_no);
1947 +	}
1948 +
1949 +	return bank_ret * NUM_COLORS + color_ret;
1950 +}
1951 +
1952 +/* Decoding page color, 0~15 */ 
1953 +static inline unsigned int page_color(struct page *page)
1954 +{
1955 +	return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
1956 +}
1957 +
1958 +/* Decoding page bank number, 0~7 */ 
1959 +static inline unsigned int page_bank(struct page *page)
1960 +{
1961 +	return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
1962 +}
1963 +
1964 +static inline unsigned int page_list_index(struct page *page)
1965 +{
1966 +    unsigned int idx;  
1967 +    idx = (page_color(page) + page_bank(page)*(number_cachecolors));
1968 +
1969 +    return idx; 
1970 +}
1971 +
1972 +
1973 +
1974 +/*
1975 + * It is used to determine the smallest number of page lists. 
1976 + */
1977 +static unsigned long smallest_nr_pages(void)
1978 +{
1979 +	unsigned long i, min_pages;
1980 +	struct color_group *cgroup;
1981 +	cgroup = &color_groups[16*2];
1982 +	min_pages =atomic_read(&cgroup->nr_pages); 
1983 +	for (i = 16*2; i < NUM_PAGE_LIST; ++i) {
1984 +		cgroup = &color_groups[i];
1985 +		if (atomic_read(&cgroup->nr_pages) < min_pages)
1986 +			min_pages = atomic_read(&cgroup->nr_pages);
1987 +	}
1988 +	return min_pages;
1989 +}
1990 +
1991 +static void show_nr_pages(void)
1992 +{
1993 +	unsigned long i;
1994 +	struct color_group *cgroup;
1995 +	printk("show nr pages***************************************\n");
1996 +	for (i = 0; i < NUM_PAGE_LIST; ++i) {
1997 +		cgroup = &color_groups[i];
1998 +		printk("(%03ld) =  %03d, ", i, atomic_read(&cgroup->nr_pages));
1999 +		if((i % 8) ==7) {
2000 +		    printk("\n");
2001 +		}
2002 +	}
2003 +}
2004 +
2005 +/*
2006 + * Add a page to current pool.
2007 + */
2008 +void add_page_to_color_list(struct page *page)
2009 +{
2010 +	const unsigned long color = page_list_index(page);
2011 +	struct color_group *cgroup = &color_groups[color];
2012 +	BUG_ON(in_list(&page->lru) || PageLRU(page));
2013 +	BUG_ON(page_count(page) > 1);
2014 +	spin_lock(&cgroup->lock);
2015 +	list_add_tail(&page->lru, &cgroup->list);
2016 +	atomic_inc(&cgroup->nr_pages);
2017 +	SetPageLRU(page);
2018 +	spin_unlock(&cgroup->lock);
2019 +}
2020 +
2021 +/*
2022 + * Replenish the page pool. 
2023 + * If the newly allocate page is what we want, it will be pushed to the correct page list
2024 + * otherwise, it will be freed. 
2025 + * A user needs to invoke this function until the page pool has enough pages.
2026 + */
2027 +static int do_add_pages(void)
2028 +{
2029 +	struct page *page, *page_tmp;
2030 +	LIST_HEAD(free_later);
2031 +	unsigned long color;
2032 +	int ret = 0;
2033 +	int i = 0;
2034 +	int free_counter = 0;
2035 +	unsigned long counter[128]= {0}; 
2036 +        
2037 +	// until all the page lists contain enough pages 
2038 +	for (i=0; i< 1024*20;i++) {
2039 +		page = alloc_page(GFP_HIGHUSER_MOVABLE);
2040 +	
2041 +		if (unlikely(!page)) {
2042 +			printk(KERN_WARNING "Could not allocate pages.\n");
2043 +			ret = -ENOMEM;
2044 +			goto out;
2045 +		}
2046 +		color = page_list_index(page);
2047 +		counter[color]++;
2048 +		if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=0) {
2049 +			add_page_to_color_list(page);
2050 +		} else {
2051 +			// Pages here will be freed later 
2052 +			list_add_tail(&page->lru, &free_later);
2053 +			free_counter++;
2054 +		}
2055 +	}
2056 +
2057 +	// Free the unwanted pages
2058 +	list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
2059 +		list_del(&page->lru);
2060 +		__free_page(page);
2061 +	}
2062 +out:
2063 +        return ret;
2064 +}
2065 +
2066 +/*
2067 + * Provide pages for replacement according cache color 
2068 + * This should be the only implementation here
2069 + * This function should not be accessed by others directly. 
2070 + * 
2071 + */ 
2072 +static struct page *new_alloc_page_color( unsigned long color)
2073 +{
2074 +//	printk("allocate new page color = %d\n", color);	
2075 +	struct color_group *cgroup;
2076 +	struct page *rPage = NULL;
2077 +		
2078 +	if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
2079 +		TRACE_CUR("Wrong color %lu\n", color);	
2080 +		goto out;
2081 +	}
2082 +
2083 +		
2084 +	cgroup = &color_groups[color];
2085 +	spin_lock(&cgroup->lock);
2086 +	if (unlikely(!atomic_read(&cgroup->nr_pages))) {
2087 +		TRACE_CUR("No free %lu colored pages.\n", color);
2088 +		goto out_unlock;
2089 +	}
2090 +	rPage = list_first_entry(&cgroup->list, struct page, lru);
2091 +	BUG_ON(page_count(rPage) > 1);
2092 +	//get_page(rPage);
2093 +	list_del(&rPage->lru);
2094 +	atomic_dec(&cgroup->nr_pages);
2095 +	ClearPageLRU(rPage);
2096 +out_unlock:
2097 +	spin_unlock(&cgroup->lock);
2098 +out:
2099 +	return rPage;
2100 +}
2101 +
2102 +struct page* get_colored_page(unsigned long color)
2103 +{
2104 +	return new_alloc_page_color(color);
2105 +}
2106 +
2107 +/*
2108 + * provide pages for replacement according to  
2109 + * node = 0 for Level A tasks in Cpu 0
2110 + * node = 1 for Level B tasks in Cpu 0
2111 + * node = 2 for Level A tasks in Cpu 1
2112 + * node = 3 for Level B tasks in Cpu 1
2113 + * node = 4 for Level A tasks in Cpu 2
2114 + * node = 5 for Level B tasks in Cpu 2
2115 + * node = 6 for Level A tasks in Cpu 3
2116 + * node = 7 for Level B tasks in Cpu 3
2117 + * node = 8 for Level C tasks 
2118 + */
2119 +struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
2120 +{
2121 +	struct page *rPage = NULL;
2122 +	int try = 0;
2123 +	unsigned int idx;
2124 +	
2125 +	if (node_index[node] == -1)
2126 +		idx = first_index(node);
2127 +	else
2128 +		idx = node_index[node];
2129 +	
2130 +	BUG_ON(idx<0 || idx>127);
2131 +	rPage =  new_alloc_page_color(idx);
2132 +	if (node_index[node] == last_index(node))
2133 +		node_index[node] = first_index(node);
2134 +	else
2135 +		node_index[node]++;
2136 +
2137 +	while (!rPage)  {
2138 +		try++;
2139 +		if (try>=256)
2140 +			break;
2141 +		idx = get_next_index(node, idx);
2142 +		printk(KERN_ALERT "try = %d out of page! requesting node  = %ld, idx = %d\n", try, node, idx);
2143 +		BUG_ON(idx<0 || idx>127);
2144 +		rPage = new_alloc_page_color(idx);
2145 +	}
2146 +	node_index[node] = idx;
2147 +	return rPage; 
2148 +}
2149 +
2150 +
2151 +/*
2152 + * Reclaim pages.
2153 + */
2154 +void reclaim_page(struct page *page)
2155 +{
2156 +	const unsigned long color = page_list_index(page);
2157 +	spin_lock(&reclaim_lock);
2158 +    	put_page(page);
2159 +	add_page_to_color_list(page);
2160 +
2161 +	spin_unlock(&reclaim_lock);
2162 +	printk("Reclaimed page(%ld) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
2163 +}
2164 +
2165 +
2166 +/*
2167 + * Initialize the numbers of banks and cache colors 
2168 + */ 
2169 +static void __init init_variables(void)
2170 +{
2171 +	number_banks = counting_one_set(BANK_MASK); 
2172 +	number_banks = two_exp(number_banks); 
2173 +
2174 +	number_cachecolors = counting_one_set(CACHE_MASK);
2175 +	number_cachecolors = two_exp(number_cachecolors);
2176 +	NUM_PAGE_LIST = number_banks * number_cachecolors; 
2177 +        printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
2178 +	mutex_init(&void_lockdown_proc);
2179 +	spin_lock_init(&reclaim_lock);
2180 +
2181 +}
2182 +
2183 +
2184 +/*
2185 + * Initialize the page pool 
2186 + */
2187 +static int __init init_color_groups(void)
2188 +{
2189 +	struct color_group *cgroup;
2190 +	unsigned long i;
2191 +	int err = 0;
2192 +
2193 +        printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
2194 +        color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
2195 +
2196 +	if (!color_groups) {
2197 +		printk(KERN_WARNING "Could not allocate color groups.\n");
2198 +		err = -ENOMEM;
2199 +	}else{
2200 +
2201 +		for (i = 0; i < NUM_PAGE_LIST; ++i) {
2202 +			cgroup = &color_groups[i];
2203 +			atomic_set(&cgroup->nr_pages, 0);
2204 +			INIT_LIST_HEAD(&cgroup->list);
2205 +			spin_lock_init(&cgroup->lock);
2206 +		}
2207 +	}
2208 +        return err;
2209 +}
2210 +
2211 +int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2212 +		size_t *lenp, loff_t *ppos)
2213 +{
2214 +	int ret = 0, i = 0;
2215 +	mutex_lock(&void_lockdown_proc);
2216 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2217 +	if (ret)
2218 +		goto out;
2219 +	if (write) {
2220 +            printk("New set Partition : \n");
2221 +	    for(i =0;i <9;i++)
2222 +            {
2223 +                set_index[i] = 0;
2224 +                printk("set[%d] = %x \n", i, set_partition[i]);
2225 +            }
2226 +	}
2227 +out:
2228 +	mutex_unlock(&void_lockdown_proc);
2229 +	return ret;
2230 +}
2231 +
2232 +int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2233 +		size_t *lenp, loff_t *ppos)
2234 +{
2235 +	int ret = 0, i = 0;
2236 +	mutex_lock(&void_lockdown_proc);
2237 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2238 +	if (ret)
2239 +		goto out;
2240 +	if (write) {
2241 +	    for(i =0;i <9;i++)
2242 +            {
2243 +                bank_index[i] = 0;
2244 +            }
2245 +	}
2246 +out:
2247 +	mutex_unlock(&void_lockdown_proc);
2248 +	return ret;
2249 +}
2250 +
2251 +int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
2252 +		size_t *lenp, loff_t *ppos)
2253 +{
2254 +	int ret = 0;
2255 +	mutex_lock(&void_lockdown_proc);
2256 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2257 +	if (ret)
2258 +		goto out;
2259 +	if (write) {
2260 +            show_nr_pages();
2261 +	}
2262 +out:
2263 +	mutex_unlock(&void_lockdown_proc);
2264 +	return ret;
2265 +}
2266 +
2267 +int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
2268 +		size_t *lenp, loff_t *ppos)
2269 +{
2270 +	int ret = 0;
2271 +	mutex_lock(&void_lockdown_proc);
2272 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2273 +	if (ret)
2274 +		goto out;
2275 +	if (write) {
2276 +            do_add_pages();
2277 +			show_nr_pages();
2278 +	}
2279 +out:
2280 +	mutex_unlock(&void_lockdown_proc);
2281 +	return ret;
2282 +}
2283 +
2284 +static struct ctl_table cache_table[] =
2285 +{
2286 +        
2287 +	{
2288 +		.procname	= "C0_LA_set",
2289 +		.mode		= 0666,
2290 +		.proc_handler	= set_partition_handler,
2291 +		.data		= &set_partition[0],
2292 +		.maxlen		= sizeof(set_partition[0]),
2293 +		.extra1		= &set_partition_min,
2294 +		.extra2		= &set_partition_max,
2295 +	},	
2296 +	{
2297 +		.procname	= "C0_LB_set",
2298 +		.mode		= 0666,
2299 +		.proc_handler	= set_partition_handler,
2300 +		.data		= &set_partition[1],
2301 +		.maxlen		= sizeof(set_partition[1]),
2302 +		.extra1		= &set_partition_min,
2303 +		.extra2		= &set_partition_max,
2304 +	},	
2305 +	{
2306 +		.procname	= "C1_LA_set",
2307 +		.mode		= 0666,
2308 +		.proc_handler	= set_partition_handler,
2309 +		.data		= &set_partition[2],
2310 +		.maxlen		= sizeof(set_partition[2]),
2311 +		.extra1		= &set_partition_min,
2312 +		.extra2		= &set_partition_max,
2313 +	},
2314 +	{
2315 +		.procname	= "C1_LB_set",
2316 +		.mode		= 0666,
2317 +		.proc_handler	= set_partition_handler,
2318 +		.data		= &set_partition[3],
2319 +		.maxlen		= sizeof(set_partition[3]),
2320 +		.extra1		= &set_partition_min,
2321 +		.extra2		= &set_partition_max,
2322 +	},
2323 +	{
2324 +		.procname	= "C2_LA_set",
2325 +		.mode		= 0666,
2326 +		.proc_handler	= set_partition_handler,
2327 +		.data		= &set_partition[4],
2328 +		.maxlen		= sizeof(set_partition[4]),
2329 +		.extra1		= &set_partition_min,
2330 +		.extra2		= &set_partition_max,
2331 +	},
2332 +	{
2333 +		.procname	= "C2_LB_set",
2334 +		.mode		= 0666,
2335 +		.proc_handler	= set_partition_handler,
2336 +		.data		= &set_partition[5],
2337 +		.maxlen		= sizeof(set_partition[5]),
2338 +		.extra1		= &set_partition_min,
2339 +		.extra2		= &set_partition_max,
2340 +	},
2341 +	{
2342 +		.procname	= "C3_LA_set",
2343 +		.mode		= 0666,
2344 +		.proc_handler	= set_partition_handler,
2345 +		.data		= &set_partition[6],
2346 +		.maxlen		= sizeof(set_partition[6]),
2347 +		.extra1		= &set_partition_min,
2348 +		.extra2		= &set_partition_max,
2349 +	},
2350 +	{
2351 +		.procname	= "C3_LB_set",
2352 +		.mode		= 0666,
2353 +		.proc_handler	= set_partition_handler,
2354 +		.data		= &set_partition[7],
2355 +		.maxlen		= sizeof(set_partition[7]),
2356 +		.extra1		= &set_partition_min,
2357 +		.extra2		= &set_partition_max,
2358 +	},	
2359 +	{
2360 +		.procname	= "Call_LC_set",
2361 +		.mode		= 0666,
2362 +		.proc_handler	= set_partition_handler,
2363 +		.data		= &set_partition[8],
2364 +		.maxlen		= sizeof(set_partition[8]),
2365 +		.extra1		= &set_partition_min,
2366 +		.extra2		= &set_partition_max,
2367 +	},	
2368 +	{
2369 +		.procname	= "C0_LA_bank",
2370 +		.mode		= 0666,
2371 +		.proc_handler	= bank_partition_handler,
2372 +		.data		= &bank_partition[0],
2373 +		.maxlen		= sizeof(set_partition[0]),
2374 +		.extra1		= &bank_partition_min,
2375 +		.extra2		= &bank_partition_max,
2376 +	},
2377 +	{
2378 +		.procname	= "C0_LB_bank",
2379 +		.mode		= 0666,
2380 +		.proc_handler	= bank_partition_handler,
2381 +		.data		= &bank_partition[1],
2382 +		.maxlen		= sizeof(set_partition[1]),
2383 +		.extra1		= &bank_partition_min,
2384 +		.extra2		= &bank_partition_max,
2385 +	},		
2386 +	{
2387 +		.procname	= "C1_LA_bank",
2388 +		.mode		= 0666,
2389 +		.proc_handler	= bank_partition_handler,
2390 +		.data		= &bank_partition[2],
2391 +		.maxlen		= sizeof(set_partition[2]),
2392 +		.extra1		= &bank_partition_min,
2393 +		.extra2		= &bank_partition_max,
2394 +	},
2395 +	{
2396 +		.procname	= "C1_LB_bank",
2397 +		.mode		= 0666,
2398 +		.proc_handler	= bank_partition_handler,
2399 +		.data		= &bank_partition[3],
2400 +		.maxlen		= sizeof(set_partition[3]),
2401 +		.extra1		= &bank_partition_min,
2402 +		.extra2		= &bank_partition_max,
2403 +	},
2404 +	{
2405 +		.procname	= "C2_LA_bank",
2406 +		.mode		= 0666,
2407 +		.proc_handler	= bank_partition_handler,
2408 +		.data		= &bank_partition[4],
2409 +		.maxlen		= sizeof(set_partition[4]),
2410 +		.extra1		= &bank_partition_min,
2411 +		.extra2		= &bank_partition_max,
2412 +	},	
2413 +	{
2414 +		.procname	= "C2_LB_bank",
2415 +		.mode		= 0666,
2416 +		.proc_handler	= bank_partition_handler,
2417 +		.data		= &bank_partition[5],
2418 +		.maxlen		= sizeof(set_partition[5]),
2419 +		.extra1		= &bank_partition_min,
2420 +		.extra2		= &bank_partition_max,
2421 +	},		
2422 +	{
2423 +		.procname	= "C3_LA_bank",
2424 +		.mode		= 0666,
2425 +		.proc_handler	= bank_partition_handler,
2426 +		.data		= &bank_partition[6],
2427 +		.maxlen		= sizeof(set_partition[6]),
2428 +		.extra1		= &bank_partition_min,
2429 +		.extra2		= &bank_partition_max,
2430 +	},	
2431 +	{
2432 +		.procname	= "C3_LB_bank",
2433 +		.mode		= 0666,
2434 +		.proc_handler	= bank_partition_handler,
2435 +		.data		= &bank_partition[7],
2436 +		.maxlen		= sizeof(set_partition[7]),
2437 +		.extra1		= &bank_partition_min,
2438 +		.extra2		= &bank_partition_max,
2439 +	},	
2440 +	{
2441 +		.procname	= "Call_LC_bank",
2442 +		.mode		= 0666,
2443 +		.proc_handler	= bank_partition_handler,
2444 +		.data		= &bank_partition[8],
2445 +		.maxlen		= sizeof(set_partition[8]),
2446 +		.extra1		= &bank_partition_min,
2447 +		.extra2		= &bank_partition_max,
2448 +	},	
2449 +	{
2450 +		.procname	= "show_page_pool",
2451 +		.mode		= 0666,
2452 +		.proc_handler	= show_page_pool_handler,
2453 +		.data		= &show_page_pool,
2454 +		.maxlen		= sizeof(show_page_pool),
2455 +	},		{
2456 +		.procname	= "refill_page_pool",
2457 +		.mode		= 0666,
2458 +		.proc_handler	= refill_page_pool_handler,
2459 +		.data		= &refill_page_pool,
2460 +		.maxlen		= sizeof(refill_page_pool),
2461 +	},	
2462 +	{ }
2463 +};
2464 +
2465 +static struct ctl_table litmus_dir_table[] = {
2466 +	{
2467 +		.procname	= "litmus",
2468 + 		.mode		= 0555,
2469 +		.child		= cache_table,
2470 +	},
2471 +	{ }
2472 +};
2473 +
2474 +
2475 +static struct ctl_table_header *litmus_sysctls;
2476 +
2477 +
2478 +/*
2479 + * Initialzie this proc 
2480 + */
2481 +static int __init litmus_color_init(void)
2482 +{
2483 +	int err=0;
2484 +        printk("Init bankproc.c\n");
2485 +
2486 +	init_variables();
2487 +
2488 +	printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
2489 +
2490 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
2491 +	if (!litmus_sysctls) {
2492 +		printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
2493 +		err = -EFAULT;
2494 +		goto out;
2495 +	}
2496 +
2497 +	init_color_groups();			
2498 +	do_add_pages();
2499 +
2500 +	printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
2501 +out:
2502 +	return err;
2503 +}
2504 +
2505 +module_init(litmus_color_init);
2506 +
2507 diff --git litmus/budget.c litmus/budget.c
2508 index 47bf78a..d67f4b3 100644
2509 --- litmus/budget.c
2510 +++ litmus/budget.c
2511 @@ -1,9 +1,11 @@
2512  #include <linux/sched.h>
2513  #include <linux/percpu.h>
2514  #include <linux/hrtimer.h>
2515 +#include <linux/uaccess.h>
2516  
2517  #include <litmus/litmus.h>
2518  #include <litmus/preempt.h>
2519 +#include <litmus/sched_plugin.h>
2520  
2521  #include <litmus/budget.h>
2522  
2523 @@ -113,4 +115,54 @@ static int __init init_budget_enforcement(void)
2524  	return 0;
2525  }
2526  
2527 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining)
2528 +{
2529 +	struct task_struct *t = current;
2530 +	unsigned long flags;
2531 +	s64 delta;
2532 +
2533 +	local_irq_save(flags);
2534 +
2535 +	delta = sched_clock_cpu(smp_processor_id()) - t->se.exec_start;
2536 +	if (delta < 0)
2537 +		delta = 0;
2538 +
2539 +	TRACE_CUR("current_budget: sc:%llu start:%llu lt_t:%llu delta:%lld exec-time:%llu rem:%llu\n",
2540 +		sched_clock_cpu(smp_processor_id()), t->se.exec_start,
2541 +		litmus_clock(), delta,
2542 +		tsk_rt(t)->job_params.exec_time,
2543 +		budget_remaining(t));
2544 +
2545 +	if (used_so_far)
2546 +		*used_so_far = tsk_rt(t)->job_params.exec_time + delta;
2547 +
2548 +	if (remaining) {
2549 +		*remaining = budget_remaining(t);
2550 +		if (*remaining > delta)
2551 +			*remaining -= delta;
2552 +		else
2553 +			*remaining = 0;
2554 +	}
2555 +
2556 +	local_irq_restore(flags);
2557 +}
2558 +
2559 +asmlinkage long sys_get_current_budget(
2560 +	lt_t __user * _expended,
2561 +	lt_t __user *_remaining)
2562 +{
2563 +	lt_t expended = 0, remaining = 0;
2564 +
2565 +	if (is_realtime(current))
2566 +		litmus->current_budget(&expended, &remaining);
2567 +
2568 +	if (_expended && put_user(expended, _expended))
2569 +		return -EFAULT;
2570 +
2571 +	if (_remaining && put_user(remaining, _remaining))
2572 +		return -EFAULT;
2573 +
2574 +	return 0;
2575 +}
2576 +
2577  module_init(init_budget_enforcement);
2578 diff --git litmus/cache_proc.c litmus/cache_proc.c
2579 new file mode 100644
2580 index 0000000..90fb17d
2581 --- /dev/null
2582 +++ litmus/cache_proc.c
2583 @@ -0,0 +1,1338 @@
2584 +#include <asm/uaccess.h>
2585 +#include <linux/uaccess.h>
2586 +#include <linux/init.h>
2587 +#include <linux/types.h>
2588 +#include <linux/kernel.h>
2589 +#include <linux/module.h>
2590 +#include <linux/sysctl.h>
2591 +#include <linux/slab.h>
2592 +#include <linux/io.h>
2593 +#include <linux/mutex.h>
2594 +#include <linux/time.h>
2595 +#include <linux/random.h>
2596 +#include <linux/sched.h>
2597 +
2598 +#include <litmus/rt_param.h>
2599 +#include <litmus/litmus.h>
2600 +#include <litmus/litmus_proc.h>
2601 +#include <litmus/sched_trace.h>
2602 +#include <litmus/cache_proc.h>
2603 +#include <litmus/mc2_common.h>
2604 +
2605 +#include <asm/hardware/cache-l2x0.h>
2606 +#include <asm/cacheflush.h>
2607 +
2608 +#define UNLOCK_ALL	0x00000000 /* allocation in any way */
2609 +#define LOCK_ALL        (~UNLOCK_ALL)
2610 +#define MAX_NR_WAYS	16
2611 +#define MAX_NR_COLORS	16
2612 +#define CACHELINE_SIZE 32
2613 +#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
2614 +#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
2615 +
2616 +typedef struct cacheline
2617 +{
2618 +        int line[INTS_IN_CACHELINE];
2619 +} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
2620 +
2621 +void mem_lock(u32 lock_val, int cpu);
2622 +
2623 +/*
2624 + * unlocked_way[i] : allocation can occur in way i
2625 + *
2626 + * 0 = allocation can occur in the corresponding way
2627 + * 1 = allocation cannot occur in the corresponding way
2628 + */
2629 +u32 unlocked_way[MAX_NR_WAYS]  = {
2630 +	0xFFFFFFFE, /* way 0 unlocked */
2631 +	0xFFFFFFFD,
2632 +	0xFFFFFFFB,
2633 +	0xFFFFFFF7,
2634 +	0xFFFFFFEF, /* way 4 unlocked */
2635 +	0xFFFFFFDF,
2636 +	0xFFFFFFBF,
2637 +	0xFFFFFF7F,
2638 +	0xFFFFFEFF, /* way 8 unlocked */
2639 +	0xFFFFFDFF,
2640 +	0xFFFFFBFF,
2641 +	0xFFFFF7FF,
2642 +	0xFFFFEFFF, /* way 12 unlocked */
2643 +	0xFFFFDFFF,
2644 +	0xFFFFBFFF,
2645 +	0xFFFF7FFF,
2646 +};
2647 +
2648 +u32 nr_unlocked_way[MAX_NR_WAYS+1]  = {
2649 +	0x0000FFFF, /* all ways are locked. usable = 0*/
2650 +	0x0000FFFE, /* way ~0 unlocked. usable = 1 */
2651 +	0x0000FFFC,
2652 +	0x0000FFF8,
2653 +	0x0000FFF0,
2654 +	0x0000FFE0,
2655 +	0x0000FFC0,
2656 +	0x0000FF80,
2657 +	0x0000FF00,
2658 +	0x0000FE00,
2659 +	0x0000FC00,
2660 +	0x0000F800,
2661 +	0x0000F000,
2662 +	0x0000E000,
2663 +	0x0000C000,
2664 +	0x00008000,
2665 +	0x00000000, /* way ~15 unlocked. usable = 16 */
2666 +};
2667 +
2668 +u32 way_partition[4] = {
2669 +	0xfffffff0, /* cpu0 */
2670 +	0xffffff0f, /* cpu1 */
2671 +	0xfffff0ff, /* cpu2 */
2672 +	0xffff0fff, /* cpu3 */
2673 +};
2674 +
2675 +u32 way_partitions[9] = {
2676 +	0xffff00ff, /* cpu0 A */
2677 +	0xffff00ff, /* cpu0 B */
2678 +	0xffff00ff, /* cpu1 A */
2679 +	0xffff00ff, /* cpu1 B */
2680 +	0xffff00ff, /* cpu2 A */
2681 +	0xffff00ff, /* cpu2 B */
2682 +	0xffff00ff, /* cpu3 A */
2683 +	0xffff00ff, /* cpu3 B */
2684 +	0xffffff00, /* lv C */
2685 +};
2686 +
2687 +u32 prev_lockdown_d_reg[5] = {
2688 +	0x0000FF00,
2689 +	0x0000FF00,
2690 +	0x0000FF00,
2691 +	0x0000FF00,
2692 +	0x000000FF, /* share with level-C */
2693 +};
2694 +
2695 +u32 prev_lockdown_i_reg[5] = {
2696 +	0x0000FF00,
2697 +	0x0000FF00,
2698 +	0x0000FF00,
2699 +	0x0000FF00,
2700 +	0x000000FF, /* share with level-C */
2701 +};
2702 +
2703 +u32 prev_lbm_i_reg[8] = {
2704 +	0x00000000,
2705 +	0x00000000,
2706 +	0x00000000,
2707 +	0x00000000,
2708 +	0x00000000,
2709 +	0x00000000,
2710 +	0x00000000,
2711 +	0x00000000,
2712 +};
2713 +
2714 +u32 prev_lbm_d_reg[8] = {
2715 +	0x00000000,
2716 +	0x00000000,
2717 +	0x00000000,
2718 +	0x00000000,
2719 +	0x00000000,
2720 +	0x00000000,
2721 +	0x00000000,
2722 +	0x00000000,
2723 +};
2724 +
2725 +static void __iomem *cache_base;
2726 +static void __iomem *lockreg_d;
2727 +static void __iomem *lockreg_i;
2728 +
2729 +static u32 cache_id;
2730 +
2731 +struct mutex actlr_mutex;
2732 +struct mutex l2x0_prefetch_mutex;
2733 +struct mutex lockdown_proc;
2734 +static u32 way_partition_min;
2735 +static u32 way_partition_max;
2736 +
2737 +static int zero = 0;
2738 +static int one = 1;
2739 +
2740 +static int l1_prefetch_proc;
2741 +static int l2_prefetch_hint_proc;
2742 +static int l2_double_linefill_proc;
2743 +static int l2_data_prefetch_proc;
2744 +static int os_isolation;
2745 +static int use_part;
2746 +
2747 +u32 lockdown_reg[9] = {
2748 +	0x00000000,
2749 +	0x00000000,
2750 +	0x00000000,
2751 +	0x00000000,
2752 +	0x00000000,
2753 +	0x00000000,
2754 +	0x00000000,
2755 +	0x00000000,
2756 +};
2757 +	
2758 +
2759 +#define ld_d_reg(cpu) ({ int __cpu = cpu; \
2760 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
2761 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
2762 +#define ld_i_reg(cpu) ({ int __cpu = cpu; \
2763 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
2764 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
2765 +
2766 +int lock_all;
2767 +int nr_lockregs;
2768 +static raw_spinlock_t cache_lock;
2769 +static raw_spinlock_t prefetch_lock;
2770 +static void ***flusher_pages = NULL;
2771 +
2772 +extern void l2c310_flush_all(void);
2773 +
2774 +static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
2775 +{
2776 +	/* wait for cache operation by line or way to complete */
2777 +	while (readl_relaxed(reg) & mask)
2778 +		cpu_relax();
2779 +}
2780 +
2781 +#ifdef CONFIG_CACHE_L2X0 
2782 +static inline void cache_wait(void __iomem *reg, unsigned long mask)
2783 +{
2784 +	/* cache operations by line are atomic on PL310 */
2785 +}
2786 +#else
2787 +#define cache_wait	cache_wait_way
2788 +#endif
2789 +
2790 +static inline void cache_sync(void)
2791 +{
2792 +	void __iomem *base = cache_base;
2793 +
2794 +	writel_relaxed(0, base + L2X0_CACHE_SYNC);
2795 +	cache_wait(base + L2X0_CACHE_SYNC, 1);
2796 +}
2797 +
2798 +static void print_lockdown_registers(int cpu)
2799 +{
2800 +	int i;
2801 +	//for (i = 0; i < nr_lockregs; i++) {
2802 +	for (i = 0; i < 4; i++) {
2803 +		printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
2804 +				i, readl_relaxed(ld_d_reg(i)));
2805 +		printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
2806 +				i, readl_relaxed(ld_i_reg(i)));
2807 +	}
2808 +}
2809 +
2810 +static void test_lockdown(void *ignore)
2811 +{
2812 +	int i, cpu;
2813 +
2814 +	cpu = smp_processor_id();
2815 +	printk("Start lockdown test on CPU %d.\n", cpu);
2816 +
2817 +	for (i = 0; i < nr_lockregs; i++) {
2818 +		printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
2819 +		printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i));
2820 +	}
2821 +
2822 +	printk("Lockdown initial state:\n");
2823 +	print_lockdown_registers(cpu);
2824 +	printk("---\n");
2825 +
2826 +	for (i = 0; i < nr_lockregs; i++) {
2827 +		writel_relaxed(1, ld_d_reg(i));
2828 +		writel_relaxed(2, ld_i_reg(i));
2829 +	}
2830 +	printk("Lockdown all data=1 instr=2:\n");
2831 +	print_lockdown_registers(cpu);
2832 +	printk("---\n");
2833 +
2834 +	for (i = 0; i < nr_lockregs; i++) {
2835 +		writel_relaxed((1 << i), ld_d_reg(i));
2836 +		writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
2837 +	}
2838 +	printk("Lockdown varies:\n");
2839 +	print_lockdown_registers(cpu);
2840 +	printk("---\n");
2841 +
2842 +	for (i = 0; i < nr_lockregs; i++) {
2843 +		writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
2844 +		writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
2845 +	}
2846 +	printk("Lockdown all zero:\n");
2847 +	print_lockdown_registers(cpu);
2848 +
2849 +	printk("End lockdown test.\n");
2850 +}
2851 +
2852 +void litmus_setup_lockdown(void __iomem *base, u32 id)
2853 +{
2854 +	cache_base = base;
2855 +	cache_id = id;
2856 +	lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
2857 +	lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
2858 +    
2859 +	if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
2860 +		nr_lockregs = 8;
2861 +	} else {
2862 +		printk("Unknown cache ID!\n");
2863 +		nr_lockregs = 1;
2864 +	}
2865 +	
2866 +	mutex_init(&actlr_mutex);
2867 +	mutex_init(&l2x0_prefetch_mutex);
2868 +	mutex_init(&lockdown_proc);
2869 +	raw_spin_lock_init(&cache_lock);
2870 +	raw_spin_lock_init(&prefetch_lock);
2871 +	
2872 +	test_lockdown(NULL);
2873 +}
2874 +
2875 +int way_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2876 +		size_t *lenp, loff_t *ppos)
2877 +{
2878 +	int ret = 0, i;
2879 +	unsigned long flags;
2880 +	
2881 +	mutex_lock(&lockdown_proc);
2882 +	
2883 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2884 +	if (ret)
2885 +		goto out;
2886 +	
2887 +	if (write) {
2888 +		printk("Way-partition settings:\n");
2889 +		for (i = 0; i < 9; i++) {
2890 +			printk("0x%08X\n", way_partitions[i]);
2891 +		}
2892 +		for (i = 0; i < 4; i++) {
2893 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2894 +				       i * L2X0_LOCKDOWN_STRIDE);
2895 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2896 +				       i * L2X0_LOCKDOWN_STRIDE);
2897 +		}
2898 +	}
2899 +	
2900 +	local_irq_save(flags);
2901 +	print_lockdown_registers(smp_processor_id());
2902 +	l2c310_flush_all();
2903 +	local_irq_restore(flags);
2904 +out:
2905 +	mutex_unlock(&lockdown_proc);
2906 +	return ret;
2907 +}
2908 +
2909 +int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
2910 +		size_t *lenp, loff_t *ppos)
2911 +{
2912 +	int ret = 0, i;
2913 +	unsigned long flags;
2914 +	
2915 +	mutex_lock(&lockdown_proc);
2916 +	
2917 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2918 +	if (ret)
2919 +		goto out;
2920 +	
2921 +	if (write && lock_all == 1) {
2922 +		for (i = 0; i < nr_lockregs; i++) {
2923 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2924 +				       i * L2X0_LOCKDOWN_STRIDE);
2925 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2926 +				       i * L2X0_LOCKDOWN_STRIDE);
2927 +		}
2928 +/*		
2929 +		for (i = 0; i < nr_lockregs;  i++) {
2930 +			barrier();
2931 +			mem_lock(LOCK_ALL, i);
2932 +			barrier();
2933 +			//writel_relaxed(nr_unlocked_way[0], ld_d_reg(i));
2934 +			//writel_relaxed(nr_unlocked_way[0], ld_i_reg(i));
2935 +		}
2936 +*/		
2937 +	}
2938 +	if (write && lock_all == 0) {
2939 +		for (i = 0; i < nr_lockregs; i++) {
2940 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2941 +				       i * L2X0_LOCKDOWN_STRIDE);
2942 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2943 +				       i * L2X0_LOCKDOWN_STRIDE);
2944 +		}
2945 +
2946 +	}
2947 +
2948 +	local_irq_save(flags);
2949 +	print_lockdown_registers(smp_processor_id());
2950 +	l2c310_flush_all();
2951 +	local_irq_restore(flags);
2952 +out:
2953 +	mutex_unlock(&lockdown_proc);
2954 +	return ret;
2955 +}
2956 +
2957 +void cache_lockdown(u32 lock_val, int cpu)
2958 +{
2959 +	__asm__ __volatile__ (
2960 +"	str	%[lockval], [%[dcachereg]]\n"
2961 +"	str	%[lockval], [%[icachereg]]\n"
2962 +	: 
2963 +	: [dcachereg] "r" (ld_d_reg(cpu)),
2964 +	  [icachereg] "r" (ld_i_reg(cpu)),
2965 +	  [lockval] "r" (lock_val)
2966 +	: "cc");
2967 +}
2968 +
2969 +void do_partition(enum crit_level lv, int cpu)
2970 +{
2971 +	u32 regs;
2972 +	unsigned long flags;
2973 +	
2974 +	if (lock_all || !use_part)
2975 +		return;
2976 +	raw_spin_lock_irqsave(&cache_lock, flags);
2977 +	switch(lv) {
2978 +		case CRIT_LEVEL_A:
2979 +			regs = ~way_partitions[cpu*2];
2980 +			regs &= 0x0000ffff;
2981 +			break;
2982 +		case CRIT_LEVEL_B:
2983 +			regs = ~way_partitions[cpu*2+1];
2984 +			regs &= 0x0000ffff;
2985 +			break;
2986 +		case CRIT_LEVEL_C:
2987 +		case NUM_CRIT_LEVELS:
2988 +			regs = ~way_partitions[8];
2989 +			regs &= 0x0000ffff;
2990 +			break;
2991 +		default:
2992 +			BUG();
2993 +
2994 +	}
2995 +	barrier();
2996 +
2997 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2998 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2999 +	barrier();
3000 +
3001 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
3002 +}
3003 +
3004 +void lock_cache(int cpu, u32 val)
3005 +{
3006 +	unsigned long flags;
3007 +	
3008 +	local_irq_save(flags);
3009 +	if (val != 0xffffffff) {
3010 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
3011 +					   cpu * L2X0_LOCKDOWN_STRIDE);
3012 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
3013 +					   cpu * L2X0_LOCKDOWN_STRIDE);
3014 +	}
3015 +	else {
3016 +		int i;
3017 +		for (i = 0; i < 4; i++)
3018 +			do_partition(CRIT_LEVEL_A, i);
3019 +	}
3020 +	local_irq_restore(flags);
3021 +}
3022 +
3023 +int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
3024 +		size_t *lenp, loff_t *ppos)
3025 +{
3026 +	int ret = 0;
3027 +	
3028 +	mutex_lock(&lockdown_proc);
3029 +
3030 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3031 +	if (ret)
3032 +		goto out;
3033 +	
3034 +
3035 +	printk("USE_PART HANDLER = %d\n", use_part);
3036 +
3037 +out:
3038 +	mutex_unlock(&lockdown_proc);
3039 +	return ret;
3040 +}
3041 +
3042 +int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
3043 +		size_t *lenp, loff_t *ppos)
3044 +{
3045 +	int ret = 0;
3046 +	
3047 +	mutex_lock(&lockdown_proc);
3048 +	
3049 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3050 +	if (ret)
3051 +		goto out;
3052 +	
3053 +
3054 +	printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
3055 +
3056 +out:
3057 +	mutex_unlock(&lockdown_proc);
3058 +	return ret;
3059 +}
3060 +
3061 +int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
3062 +		size_t *lenp, loff_t *ppos)
3063 +{
3064 +	int ret = 0, i;
3065 +	
3066 +	mutex_lock(&lockdown_proc);
3067 +	
3068 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3069 +	if (ret)
3070 +		goto out;
3071 +	
3072 +	if (write) {
3073 +		for (i = 0; i < nr_lockregs; i++) {
3074 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
3075 +				       i * L2X0_LOCKDOWN_STRIDE);
3076 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
3077 +				       i * L2X0_LOCKDOWN_STRIDE);
3078 +		}
3079 +	}
3080 +
3081 +out:
3082 +	mutex_unlock(&lockdown_proc);
3083 +	return ret;
3084 +}
3085 +
3086 +int lockdown_global_handler(struct ctl_table *table, int write, void __user *buffer,
3087 +		size_t *lenp, loff_t *ppos)
3088 +{
3089 +	int ret = 0, i;
3090 +	
3091 +	mutex_lock(&lockdown_proc);
3092 +	
3093 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3094 +	if (ret)
3095 +		goto out;
3096 +	
3097 +	if (write) {
3098 +		for (i = 0; i < nr_lockregs; i++) {
3099 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
3100 +				       i *