Attachment 'mc2_litmus-rt_2015.1-rtns18.patch'

Download

   1 diff --git arch/arm/boot/compressed/Makefile arch/arm/boot/compressed/Makefile
   2 index 6e1fb2b..e2284fe 100644
   3 --- arch/arm/boot/compressed/Makefile
   4 +++ arch/arm/boot/compressed/Makefile
   5 @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
   6  ORIG_CFLAGS := $(KBUILD_CFLAGS)
   7  KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
   8  endif
   9 +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
  10  
  11  ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  12  asflags-y := -DZIMAGE
  13 diff --git arch/arm/include/asm/unistd.h arch/arm/include/asm/unistd.h
  14 index 0c462a9..7197bbe 100644
  15 --- arch/arm/include/asm/unistd.h
  16 +++ arch/arm/include/asm/unistd.h
  17 @@ -19,7 +19,8 @@
  18   * This may need to be greater than __NR_last_syscall+1 in order to
  19   * account for the padding in the syscall table
  20   */
  21 -#define __NR_syscalls  (388 + NR_litmus_syscalls)
  22 +#define __NR_syscalls  (388 + NR_litmus_syscalls + 3)
  23 +
  24  
  25  /*
  26   * *NOTE*: This is a ghost syscall private to the kernel.  Only the
  27 diff --git arch/arm/kernel/calls.S arch/arm/kernel/calls.S
  28 index f4738a8..04cc83b 100644
  29 --- arch/arm/kernel/calls.S
  30 +++ arch/arm/kernel/calls.S
  31 @@ -409,6 +409,15 @@
  32          CALL(sys_wait_for_ts_release)
  33  		CALL(sys_release_ts)
  34  		CALL(sys_null_call)
  35 +/* 400 */	CALL(sys_get_current_budget)
  36 +		CALL(sys_reservation_create)
  37 +		CALL(sys_reservation_destroy)
  38 +		CALL(sys_set_mc2_task_param)
  39 +		CALL(sys_set_page_color)
  40 +/* 405 */	CALL(sys_test_call)
  41 +		CALL(sys_run_test)
  42 +		CALL(sys_lock_buffer)
  43 +		CALL(sys_recolor_mem)
  44  
  45  #ifndef syscalls_counted
  46  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
  47 diff --git arch/arm/kernel/irq.c arch/arm/kernel/irq.c
  48 index 350f188..a9ba6e5 100644
  49 --- arch/arm/kernel/irq.c
  50 +++ arch/arm/kernel/irq.c
  51 @@ -44,6 +44,9 @@
  52  #include <asm/mach/irq.h>
  53  #include <asm/mach/time.h>
  54  
  55 +#include <litmus/cache_proc.h>
  56 +#include <litmus/litmus.h>
  57 +
  58  unsigned long irq_err_count;
  59  
  60  int arch_show_interrupts(struct seq_file *p, int prec)
  61 @@ -66,7 +69,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
  62   */
  63  void handle_IRQ(unsigned int irq, struct pt_regs *regs)
  64  {
  65 +	enter_irq_mode();
  66  	__handle_domain_irq(NULL, irq, false, regs);
  67 +	exit_irq_mode();
  68  }
  69  
  70  /*
  71 diff --git arch/arm/mm/cache-l2x0.c arch/arm/mm/cache-l2x0.c
  72 index e309c8f..969da4a 100644
  73 --- arch/arm/mm/cache-l2x0.c
  74 +++ arch/arm/mm/cache-l2x0.c
  75 @@ -33,6 +33,8 @@
  76  #include "cache-tauros3.h"
  77  #include "cache-aurora-l2.h"
  78  
  79 +#include <litmus/cache_proc.h>
  80 +
  81  struct l2c_init_data {
  82  	const char *type;
  83  	unsigned way_size_0;
  84 @@ -651,6 +653,11 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
  85  	 */
  86  	aux |= L310_AUX_CTRL_NS_LOCKDOWN;
  87  
  88 +	/*
  89 +	 * Always enable non-secure interrupt access control registers
  90 +	 */
  91 +	aux |= L220_AUX_CTRL_NS_INT_CTRL;
  92 +
  93  	l2c_enable(base, aux, num_lock);
  94  
  95  	/* Read back resulting AUX_CTRL value as it could have been altered. */
  96 @@ -726,7 +733,6 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  97  
  98  	if (n) {
  99  		unsigned i;
 100 -
 101  		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
 102  		for (i = 0; i < n; i++)
 103  			pr_cont(" %s", errata[i]);
 104 @@ -774,6 +780,11 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
 105  	},
 106  };
 107  
 108 +void l2c310_flush_all(void)
 109 +{
 110 +	l2c210_flush_all();
 111 +};
 112 +
 113  static int __init __l2c_init(const struct l2c_init_data *data,
 114  			     u32 aux_val, u32 aux_mask, u32 cache_id)
 115  {
 116 @@ -876,6 +887,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
 117  	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
 118  		data->type, cache_id, aux);
 119  
 120 +	litmus_setup_lockdown(l2x0_base, cache_id);
 121 +	
 122  	return 0;
 123  }
 124  
 125 diff --git arch/x86/syscalls/syscall_32.tbl arch/x86/syscalls/syscall_32.tbl
 126 index 34680a5..6b80d98 100644
 127 --- arch/x86/syscalls/syscall_32.tbl
 128 +++ arch/x86/syscalls/syscall_32.tbl
 129 @@ -377,3 +377,12 @@
 130  368	i386	wait_for_ts_release	sys_wait_for_ts_release
 131  369	i386	release_ts		sys_release_ts
 132  370	i386	null_call		sys_null_call
 133 +371	i386	get_current_budget	sys_get_current_budget
 134 +372	i386	reservation_create	sys_reservation_create
 135 +373	i386	reservation_destroy	sys_reservation_destroy
 136 +374	i386	set_mc2_task_param	sys_set_mc2_task_param
 137 +375	i386	set_page_color		sys_set_page_color
 138 +376	i386	test_call		sys_test_call
 139 +377	i386	run_test		sys_run_test
 140 +378	i386	lock_buffer		sys_lock_buffer
 141 +379	i386	recolor_mem		sys_recolor_mem
 142 diff --git arch/x86/syscalls/syscall_64.tbl arch/x86/syscalls/syscall_64.tbl
 143 index cbd1b6b..05aa913 100644
 144 --- arch/x86/syscalls/syscall_64.tbl
 145 +++ arch/x86/syscalls/syscall_64.tbl
 146 @@ -342,6 +342,15 @@
 147  360	common	wait_for_ts_release	sys_wait_for_ts_release
 148  361	common	release_ts		sys_release_ts
 149  362	common	null_call		sys_null_call
 150 +363	common	get_current_budget	sys_get_current_budget
 151 +364	common	reservation_create	sys_reservation_create
 152 +365	common	reservation_destroy	sys_reservation_destroy
 153 +366	common	set_mc2_task_param	sys_set_mc2_task_param
 154 +367	common	set_page_color		sys_set_page_color
 155 +368	common	test_call		sys_test_call
 156 +369	common	run_test		sys_run_test
 157 +370	common	lock_buffer		sys_lock_buffer
 158 +371	common	recolor_mem		sys_recolor_mem
 159  
 160  #
 161  # x32-specific system call numbers start at 512 to avoid cache impact
 162 diff --git drivers/media/usb/uvc/uvc_video.c drivers/media/usb/uvc/uvc_video.c
 163 index 20ccc9d..9dd652c 100644
 164 --- drivers/media/usb/uvc/uvc_video.c
 165 +++ drivers/media/usb/uvc/uvc_video.c
 166 @@ -26,6 +26,16 @@
 167  
 168  #include "uvcvideo.h"
 169  
 170 +#include <litmus/litmus.h>
 171 +#include <litmus/trace.h>
 172 +
 173 +// default behavior is using Level-C/OS
 174 +#ifdef CONFIG_UVC_ALLOC_LEVEL_AB
 175 +#define UVC_FLAG	(GFP_COLOR)
 176 +#else
 177 +#define UVC_FLAG	(0)
 178 +#endif	
 179 +
 180  /* ------------------------------------------------------------------------
 181   * UVC Controls
 182   */
 183 @@ -167,7 +177,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
 184  			query == UVC_GET_DEF)
 185  		return -EIO;
 186  
 187 -	data = kmalloc(size, GFP_KERNEL);
 188 +	data = kmalloc(size, GFP_KERNEL|UVC_FLAG);
 189  	if (data == NULL)
 190  		return -ENOMEM;
 191  
 192 @@ -251,7 +261,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
 193  	int ret;
 194  
 195  	size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
 196 -	data = kzalloc(size, GFP_KERNEL);
 197 +	data = kzalloc(size, GFP_KERNEL|UVC_FLAG);
 198  	if (data == NULL)
 199  		return -ENOMEM;
 200  
 201 @@ -494,7 +504,7 @@ static int uvc_video_clock_init(struct uvc_streaming *stream)
 202  	clock->size = 32;
 203  
 204  	clock->samples = kmalloc(clock->size * sizeof(*clock->samples),
 205 -				 GFP_KERNEL);
 206 +				 GFP_KERNEL|UVC_FLAG);
 207  	if (clock->samples == NULL)
 208  		return -ENOMEM;
 209  
 210 @@ -1341,9 +1351,9 @@ static void uvc_video_complete(struct urb *urb)
 211  				       queue);
 212  	spin_unlock_irqrestore(&queue->irqlock, flags);
 213  
 214 -	stream->decode(urb, stream, buf);
 215 +	stream->decode(urb, stream, buf); //uvc_video_decode_isoc()
 216  
 217 -	if ((ret = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
 218 +	if ((ret = usb_submit_urb(urb, GFP_ATOMIC|UVC_FLAG)) < 0) {
 219  		uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
 220  			ret);
 221  	}
 222 @@ -1406,10 +1416,10 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
 223  #ifndef CONFIG_DMA_NONCOHERENT
 224  			stream->urb_buffer[i] = usb_alloc_coherent(
 225  				stream->dev->udev, stream->urb_size,
 226 -				gfp_flags | __GFP_NOWARN, &stream->urb_dma[i]);
 227 +				gfp_flags | __GFP_NOWARN | UVC_FLAG, &stream->urb_dma[i]);
 228  #else
 229  			stream->urb_buffer[i] =
 230 -			    kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
 231 +			    kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN | UVC_FLAG;
 232  #endif
 233  			if (!stream->urb_buffer[i]) {
 234  				uvc_free_urb_buffers(stream);
 235 @@ -1492,14 +1502,14 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
 236  	psize = uvc_endpoint_max_bpi(stream->dev->udev, ep);
 237  	size = stream->ctrl.dwMaxVideoFrameSize;
 238  
 239 -	npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags);
 240 +	npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags|UVC_FLAG);
 241  	if (npackets == 0)
 242  		return -ENOMEM;
 243  
 244  	size = npackets * psize;
 245  
 246  	for (i = 0; i < UVC_URBS; ++i) {
 247 -		urb = usb_alloc_urb(npackets, gfp_flags);
 248 +		urb = usb_alloc_urb(npackets, gfp_flags|UVC_FLAG);
 249  		if (urb == NULL) {
 250  			uvc_uninit_video(stream, 1);
 251  			return -ENOMEM;
 252 @@ -1548,7 +1558,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
 253  	size = stream->ctrl.dwMaxPayloadTransferSize;
 254  	stream->bulk.max_payload_size = size;
 255  
 256 -	npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags);
 257 +	npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags|UVC_FLAG);
 258  	if (npackets == 0)
 259  		return -ENOMEM;
 260  
 261 @@ -1565,7 +1575,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
 262  		size = 0;
 263  
 264  	for (i = 0; i < UVC_URBS; ++i) {
 265 -		urb = usb_alloc_urb(0, gfp_flags);
 266 +		urb = usb_alloc_urb(0, gfp_flags|UVC_FLAG);
 267  		if (urb == NULL) {
 268  			uvc_uninit_video(stream, 1);
 269  			return -ENOMEM;
 270 @@ -1649,12 +1659,11 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
 271  
 272  		uvc_trace(UVC_TRACE_VIDEO, "Selecting alternate setting %u "
 273  			"(%u B/frame bandwidth).\n", altsetting, best_psize);
 274 -
 275  		ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
 276  		if (ret < 0)
 277  			return ret;
 278  
 279 -		ret = uvc_init_video_isoc(stream, best_ep, gfp_flags);
 280 +		ret = uvc_init_video_isoc(stream, best_ep, gfp_flags|UVC_FLAG);
 281  	} else {
 282  		/* Bulk endpoint, proceed to URB initialization. */
 283  		ep = uvc_find_endpoint(&intf->altsetting[0],
 284 @@ -1662,7 +1671,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
 285  		if (ep == NULL)
 286  			return -EIO;
 287  
 288 -		ret = uvc_init_video_bulk(stream, ep, gfp_flags);
 289 +		ret = uvc_init_video_bulk(stream, ep, gfp_flags|UVC_FLAG);
 290  	}
 291  
 292  	if (ret < 0)
 293 @@ -1670,7 +1679,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
 294  
 295  	/* Submit the URBs. */
 296  	for (i = 0; i < UVC_URBS; ++i) {
 297 -		ret = usb_submit_urb(stream->urb[i], gfp_flags);
 298 +		ret = usb_submit_urb(stream->urb[i], gfp_flags|UVC_FLAG);
 299  		if (ret < 0) {
 300  			uvc_printk(KERN_ERR, "Failed to submit URB %u "
 301  					"(%d).\n", i, ret);
 302 @@ -1741,7 +1750,7 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset)
 303  	if (ret < 0)
 304  		return ret;
 305  
 306 -	return uvc_init_video(stream, GFP_NOIO);
 307 +	return uvc_init_video(stream, GFP_NOIO|UVC_FLAG);
 308  }
 309  
 310  /* ------------------------------------------------------------------------
 311 @@ -1892,7 +1901,7 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
 312  	if (ret < 0)
 313  		goto error_commit;
 314  
 315 -	ret = uvc_init_video(stream, GFP_KERNEL);
 316 +	ret = uvc_init_video(stream, GFP_KERNEL|UVC_FLAG);
 317  	if (ret < 0)
 318  		goto error_video;
 319  
 320 diff --git drivers/media/v4l2-core/videobuf2-core.c drivers/media/v4l2-core/videobuf2-core.c
 321 index 66ada01..0a5b616 100644
 322 --- drivers/media/v4l2-core/videobuf2-core.c
 323 +++ drivers/media/v4l2-core/videobuf2-core.c
 324 @@ -30,6 +30,12 @@
 325  #include <media/v4l2-common.h>
 326  #include <media/videobuf2-core.h>
 327  
 328 +#ifdef UVC_ALLOC_LEVEL_AB
 329 +#define VB2_CORE_FLAG	(GFP_COLOR)
 330 +#else
 331 +#define VB2_CORE_FLAG	(0)
 332 +#endif
 333 +
 334  static int debug;
 335  module_param(debug, int, 0644);
 336  
 337 @@ -200,9 +206,8 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
 338  	 */
 339  	for (plane = 0; plane < vb->num_planes; ++plane) {
 340  		unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
 341 -
 342  		mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
 343 -				      size, dma_dir, q->gfp_flags);
 344 +				      size, dma_dir, q->gfp_flags|VB2_CORE_FLAG);
 345  		if (IS_ERR_OR_NULL(mem_priv))
 346  			goto free;
 347  
 348 @@ -352,7 +357,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
 349  
 350  	for (buffer = 0; buffer < num_buffers; ++buffer) {
 351  		/* Allocate videobuf buffer structures */
 352 -		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
 353 +		vb = kzalloc(q->buf_struct_size, GFP_KERNEL|VB2_CORE_FLAG);
 354  		if (!vb) {
 355  			dprintk(1, "memory alloc for buffer struct failed\n");
 356  			break;
 357 @@ -2830,7 +2835,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
 358  		(read) ? "read" : "write", count, q->fileio_read_once,
 359  		q->fileio_write_immediately);
 360  
 361 -	fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
 362 +	fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL|VB2_CORE_FLAG);
 363  	if (fileio == NULL)
 364  		return -ENOMEM;
 365  
 366 @@ -3223,7 +3228,7 @@ int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
 367  	if (WARN_ON(q->fileio))
 368  		return -EBUSY;
 369  
 370 -	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
 371 +	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL|VB2_CORE_FLAG);
 372  	if (threadio == NULL)
 373  		return -ENOMEM;
 374  	threadio->fnc = fnc;
 375 diff --git drivers/media/v4l2-core/videobuf2-vmalloc.c drivers/media/v4l2-core/videobuf2-vmalloc.c
 376 index 657ab30..dab3772 100644
 377 --- drivers/media/v4l2-core/videobuf2-vmalloc.c
 378 +++ drivers/media/v4l2-core/videobuf2-vmalloc.c
 379 @@ -21,6 +21,12 @@
 380  #include <media/videobuf2-vmalloc.h>
 381  #include <media/videobuf2-memops.h>
 382  
 383 +#ifdef UVC_ALLOC_LEVEL_AB
 384 +#define VB2_FLAG	(GFP_COLOR)
 385 +#else
 386 +#define VB2_FLAG	(0)
 387 +#endif
 388 +
 389  struct vb2_vmalloc_buf {
 390  	void				*vaddr;
 391  	struct page			**pages;
 392 @@ -40,12 +46,16 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
 393  {
 394  	struct vb2_vmalloc_buf *buf;
 395  
 396 -	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
 397 +	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags | VB2_FLAG);
 398  	if (!buf)
 399  		return NULL;
 400  
 401  	buf->size = size;
 402 +#ifdef UVC_ALLOC_LEVEL_AB
 403 +	buf->vaddr = vmalloc_color_user(buf->size);
 404 +#else
 405  	buf->vaddr = vmalloc_user(buf->size);
 406 +#endif
 407  	buf->dma_dir = dma_dir;
 408  	buf->handler.refcount = &buf->refcount;
 409  	buf->handler.put = vb2_vmalloc_put;
 410 @@ -81,7 +91,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
 411  	struct vm_area_struct *vma;
 412  	dma_addr_t physp;
 413  
 414 -	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 415 +	buf = kzalloc(sizeof(*buf), GFP_KERNEL | VB2_FLAG);
 416  	if (!buf)
 417  		return NULL;
 418  
 419 @@ -103,7 +113,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
 420  		last  = (vaddr + size - 1) >> PAGE_SHIFT;
 421  		buf->n_pages = last - first + 1;
 422  		buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
 423 -				     GFP_KERNEL);
 424 +				     GFP_KERNEL | VB2_FLAG);
 425  		if (!buf->pages)
 426  			goto fail_pages_array_alloc;
 427  
 428 @@ -233,12 +243,12 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *de
 429  	int ret;
 430  	int i;
 431  
 432 -	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 433 +	attach = kzalloc(sizeof(*attach), GFP_KERNEL | VB2_FLAG);
 434  	if (!attach)
 435  		return -ENOMEM;
 436  
 437  	sgt = &attach->sgt;
 438 -	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
 439 +	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL | VB2_FLAG);
 440  	if (ret) {
 441  		kfree(attach);
 442  		return ret;
 443 @@ -429,7 +439,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
 444  	if (dbuf->size < size)
 445  		return ERR_PTR(-EFAULT);
 446  
 447 -	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 448 +	buf = kzalloc(sizeof(*buf), GFP_KERNEL | VB2_FLAG);
 449  	if (!buf)
 450  		return ERR_PTR(-ENOMEM);
 451  
 452 diff --git drivers/net/ethernet/freescale/fec_main.c drivers/net/ethernet/freescale/fec_main.c
 453 index 66d47e4..7dd6f26 100644
 454 --- drivers/net/ethernet/freescale/fec_main.c
 455 +++ drivers/net/ethernet/freescale/fec_main.c
 456 @@ -60,6 +60,7 @@
 457  #include <linux/prefetch.h>
 458  
 459  #include <asm/cacheflush.h>
 460 +#include <litmus/cache_proc.h>
 461  
 462  #include "fec.h"
 463  
 464 @@ -1587,6 +1588,8 @@ fec_enet_interrupt(int irq, void *dev_id)
 465  	writel(int_events, fep->hwp + FEC_IEVENT);
 466  	fec_enet_collect_events(fep, int_events);
 467  
 468 +	enter_irq_mode();
 469 +	
 470  	if ((fep->work_tx || fep->work_rx) && fep->link) {
 471  		ret = IRQ_HANDLED;
 472  
 473 @@ -1605,6 +1608,7 @@ fec_enet_interrupt(int irq, void *dev_id)
 474  	if (fep->ptp_clock)
 475  		fec_ptp_check_pps_event(fep);
 476  
 477 +	exit_irq_mode();
 478  	return ret;
 479  }
 480  
 481 diff --git include/linux/gfp.h include/linux/gfp.h
 482 index 15928f0..92643b8 100644
 483 --- include/linux/gfp.h
 484 +++ include/linux/gfp.h
 485 @@ -35,6 +35,7 @@ struct vm_area_struct;
 486  #define ___GFP_NO_KSWAPD	0x400000u
 487  #define ___GFP_OTHER_NODE	0x800000u
 488  #define ___GFP_WRITE		0x1000000u
 489 +#define ___GFP_COLOR		0x2000000u
 490  /* If the above are modified, __GFP_BITS_SHIFT may need updating */
 491  
 492  /*
 493 @@ -94,6 +95,7 @@ struct vm_area_struct;
 494  #define __GFP_NO_KSWAPD	((__force gfp_t)___GFP_NO_KSWAPD)
 495  #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
 496  #define __GFP_WRITE	((__force gfp_t)___GFP_WRITE)	/* Allocator intends to dirty page */
 497 +#define __GFP_COLOR ((__force gfp_t)___GFP_COLOR)	/* Colored page request */
 498  
 499  /*
 500   * This may seem redundant, but it's a way of annotating false positives vs.
 501 @@ -101,7 +103,7 @@ struct vm_area_struct;
 502   */
 503  #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
 504  
 505 -#define __GFP_BITS_SHIFT 25	/* Room for N __GFP_FOO bits */
 506 +#define __GFP_BITS_SHIFT 26	/* Room for N __GFP_FOO bits */
 507  #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 508  
 509  /* This equals 0, but use constants in case they ever change */
 510 @@ -127,7 +129,7 @@ struct vm_area_struct;
 511  /* Control page allocator reclaim behavior */
 512  #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
 513  			__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
 514 -			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
 515 +			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|__GFP_COLOR)
 516  
 517  /* Control slab gfp mask during early boot */
 518  #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
 519 @@ -146,6 +148,9 @@ struct vm_area_struct;
 520  /* 4GB DMA on some platforms */
 521  #define GFP_DMA32	__GFP_DMA32
 522  
 523 +/* Colored page requests */
 524 +#define GFP_COLOR	__GFP_COLOR
 525 +
 526  /* Convert GFP flags to their corresponding migrate type */
 527  static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
 528  {
 529 diff --git include/linux/migrate.h include/linux/migrate.h
 530 index cac1c09..b16047b 100644
 531 --- include/linux/migrate.h
 532 +++ include/linux/migrate.h
 533 @@ -33,6 +33,8 @@ extern int migrate_page(struct address_space *,
 534  			struct page *, struct page *, enum migrate_mode);
 535  extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
 536  		unsigned long private, enum migrate_mode mode, int reason);
 537 +extern int replicate_pages(struct list_head *l, new_page_t new, free_page_t free,
 538 +		unsigned long private, enum migrate_mode mode, int reason);
 539  
 540  extern int migrate_prep(void);
 541  extern int migrate_prep_local(void);
 542 @@ -50,7 +52,11 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
 543  		free_page_t free, unsigned long private, enum migrate_mode mode,
 544  		int reason)
 545  	{ return -ENOSYS; }
 546 -
 547 +static inline int replicate_pages(struct list_head *l, new_page_t new,
 548 +		free_page_t free, unsigned long private, enum migrate_mode mode,
 549 +		int reason)
 550 +	{ return -ENOSYS; }
 551 +	
 552  static inline int migrate_prep(void) { return -ENOSYS; }
 553  static inline int migrate_prep_local(void) { return -ENOSYS; }
 554  
 555 diff --git include/linux/mmzone.h include/linux/mmzone.h
 556 index 54d74f6..3d3f9ac 100644
 557 --- include/linux/mmzone.h
 558 +++ include/linux/mmzone.h
 559 @@ -35,6 +35,16 @@
 560   */
 561  #define PAGE_ALLOC_COSTLY_ORDER 3
 562  
 563 +/* For page coloring - This address decoding is used in imx6-sabresd
 564 + * platform without bank interleaving .
 565 + */
 566 +#define BANK_MASK				0x38000000     
 567 +#define BANK_SHIFT 				27
 568 +#define CACHE_MASK 				0x0000f000      
 569 +#define CACHE_SHIFT				12
 570 +#define MAX_PARTITIONED_ORDER	11
 571 +#define MAX_CONTIG_ORDER		11
 572 +
 573  enum {
 574  	MIGRATE_UNMOVABLE,
 575  	MIGRATE_RECLAIMABLE,
 576 @@ -157,6 +167,7 @@ enum zone_stat_item {
 577  	WORKINGSET_NODERECLAIM,
 578  	NR_ANON_TRANSPARENT_HUGEPAGES,
 579  	NR_FREE_CMA_PAGES,
 580 +	NR_FREE_HC_PAGES,
 581  	NR_VM_ZONE_STAT_ITEMS };
 582  
 583  /*
 584 @@ -476,7 +487,8 @@ struct zone {
 585  	ZONE_PADDING(_pad1_)
 586  	/* free areas of different sizes */
 587  	struct free_area	free_area[MAX_ORDER];
 588 -
 589 +	struct free_area	free_area_d[NR_CPUS][MAX_PARTITIONED_ORDER];
 590 +	
 591  	/* zone flags, see below */
 592  	unsigned long		flags;
 593  
 594 @@ -523,7 +535,7 @@ struct zone {
 595  	/* Set to true when the PG_migrate_skip bits should be cleared */
 596  	bool			compact_blockskip_flush;
 597  #endif
 598 -
 599 +	
 600  	ZONE_PADDING(_pad3_)
 601  	/* Zone statistics */
 602  	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
 603 diff --git include/linux/msg.h include/linux/msg.h
 604 index f3f302f..9bf01c9 100644
 605 --- include/linux/msg.h
 606 +++ include/linux/msg.h
 607 @@ -12,6 +12,8 @@ struct msg_msg {
 608  	struct msg_msgseg *next;
 609  	void *security;
 610  	/* the actual message follows immediately */
 611 +	dma_addr_t	handle;
 612 +	size_t alloc_len;
 613  };
 614  
 615  /* one msq_queue structure for each present queue on the system */
 616 diff --git include/linux/rmap.h include/linux/rmap.h
 617 index c89c53a..7c90e02 100644
 618 --- include/linux/rmap.h
 619 +++ include/linux/rmap.h
 620 @@ -188,7 +188,8 @@ int page_referenced(struct page *, int is_locked,
 621  #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 622  
 623  int try_to_unmap(struct page *, enum ttu_flags flags);
 624 -
 625 +int try_to_unmap_one_only(struct page *page, struct vm_area_struct *vma,
 626 +		     unsigned long address, void *arg);
 627  /*
 628   * Used by uprobes to replace a userspace page safely
 629   */
 630 diff --git include/linux/slab.h include/linux/slab.h
 631 index ffd24c8..a899dda 100644
 632 --- include/linux/slab.h
 633 +++ include/linux/slab.h
 634 @@ -87,6 +87,8 @@
 635  # define SLAB_FAILSLAB		0x00000000UL
 636  #endif
 637  
 638 +#define SLAB_NO_MERGE		0x04000000UL	/* Do not merge with existing slab */
 639 +
 640  /* The following flags affect the page allocator grouping pages by mobility */
 641  #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
 642  #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
 643 diff --git include/linux/slub_def.h include/linux/slub_def.h
 644 index 3388511..9400aa1 100644
 645 --- include/linux/slub_def.h
 646 +++ include/linux/slub_def.h
 647 @@ -98,6 +98,8 @@ struct kmem_cache {
 648  	 */
 649  	int remote_node_defrag_ratio;
 650  #endif
 651 +	/* cpu id for higher-criticality slabs */
 652 +	int cpu_id;
 653  	struct kmem_cache_node *node[MAX_NUMNODES];
 654  };
 655  
 656 diff --git include/linux/vm_event_item.h include/linux/vm_event_item.h
 657 index 9246d32..3f5a9da 100644
 658 --- include/linux/vm_event_item.h
 659 +++ include/linux/vm_event_item.h
 660 @@ -23,7 +23,7 @@
 661  
 662  enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 663  		FOR_ALL_ZONES(PGALLOC),
 664 -		PGFREE, PGACTIVATE, PGDEACTIVATE,
 665 +		PGFREE, PGFREE_HC, PGACTIVATE, PGDEACTIVATE,
 666  		PGFAULT, PGMAJFAULT,
 667  		FOR_ALL_ZONES(PGREFILL),
 668  		FOR_ALL_ZONES(PGSTEAL_KSWAPD),
 669 diff --git include/linux/vmalloc.h include/linux/vmalloc.h
 670 index 0ec5983..c647658 100644
 671 --- include/linux/vmalloc.h
 672 +++ include/linux/vmalloc.h
 673 @@ -67,8 +67,10 @@ static inline void vmalloc_init(void)
 674  #endif
 675  
 676  extern void *vmalloc(unsigned long size);
 677 +extern void *vmalloc_color(unsigned long size);
 678  extern void *vzalloc(unsigned long size);
 679  extern void *vmalloc_user(unsigned long size);
 680 +extern void *vmalloc_color_user(unsigned long size);
 681  extern void *vmalloc_node(unsigned long size, int node);
 682  extern void *vzalloc_node(unsigned long size, int node);
 683  extern void *vmalloc_exec(unsigned long size);
 684 diff --git include/linux/vmstat.h include/linux/vmstat.h
 685 index 82e7db7..b6410f7 100644
 686 --- include/linux/vmstat.h
 687 +++ include/linux/vmstat.h
 688 @@ -278,9 +278,12 @@ static inline void drain_zonestat(struct zone *zone,
 689  #endif		/* CONFIG_SMP */
 690  
 691  static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 692 -					     int migratetype)
 693 +					     int migratetype, int part_no)
 694  {
 695 -	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 696 +	if (part_no == NR_CPUS)
 697 +		__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
 698 +	else
 699 +		__mod_zone_page_state(zone, NR_FREE_HC_PAGES, nr_pages);
 700  	if (is_migrate_cma(migratetype))
 701  		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
 702  }
 703 diff --git include/litmus/budget.h include/litmus/budget.h
 704 index bd2d5c9..60eb814 100644
 705 --- include/litmus/budget.h
 706 +++ include/litmus/budget.h
 707 @@ -33,4 +33,6 @@ static inline int requeue_preempted_job(struct task_struct* t)
 708  		(!budget_exhausted(t) || !budget_enforced(t));
 709  }
 710  
 711 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining);
 712 +
 713  #endif
 714 diff --git include/litmus/cache_proc.h include/litmus/cache_proc.h
 715 new file mode 100644
 716 index 0000000..1d9d443
 717 --- /dev/null
 718 +++ include/litmus/cache_proc.h
 719 @@ -0,0 +1,21 @@
 720 +#ifndef LITMUS_CACHE_PROC_H
 721 +#define LITMUS_CACHE_PROC_H
 722 +
 723 +#ifdef __KERNEL__
 724 +
 725 +void litmus_setup_lockdown(void __iomem*, u32);
 726 +void enter_irq_mode(void);
 727 +void exit_irq_mode(void);
 728 +void flush_cache(int all);
 729 +void lock_cache(int cpu, u32 val);
 730 +void cache_lockdown(u32 lock_val, int cpu);
 731 +
 732 +extern struct page *new_alloc_page_color(unsigned long color);
 733 +
 734 +u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end);
 735 +u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end);
 736 +
 737 +#endif
 738 +
 739 +#endif
 740 +
 741 diff --git include/litmus/litmus.h include/litmus/litmus.h
 742 index a6eb534..441210c 100644
 743 --- include/litmus/litmus.h
 744 +++ include/litmus/litmus.h
 745 @@ -113,6 +113,13 @@ static inline lt_t litmus_clock(void)
 746  	((current)->state == TASK_RUNNING || 	\
 747  	 preempt_count() & PREEMPT_ACTIVE)
 748  
 749 +#define is_running(t) 			\
 750 +	((t)->state == TASK_RUNNING || 	\
 751 +	 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
 752 +
 753 +#define is_blocked(t)       \
 754 +	(!is_running(t))
 755 +
 756  #define is_released(t, now)	\
 757  	(lt_before_eq(get_release(t), now))
 758  #define is_tardy(t, now)    \
 759 diff --git include/litmus/mc2_common.h include/litmus/mc2_common.h
 760 new file mode 100644
 761 index 0000000..e3c0af2
 762 --- /dev/null
 763 +++ include/litmus/mc2_common.h
 764 @@ -0,0 +1,31 @@
 765 +/*
 766 + * MC^2 common data structures
 767 + */
 768 + 
 769 +#ifndef __UNC_MC2_COMMON_H__
 770 +#define __UNC_MC2_COMMON_H__
 771 +
 772 +enum crit_level {
 773 +	CRIT_LEVEL_A = 0,
 774 +	CRIT_LEVEL_B = 1,
 775 +	CRIT_LEVEL_C = 2,
 776 +	NUM_CRIT_LEVELS = 3,
 777 +};
 778 +
 779 +struct mc2_task {
 780 +	enum crit_level crit;
 781 +	unsigned int res_id;
 782 +};
 783 +
 784 +#ifdef __KERNEL__
 785 +
 786 +#include <litmus/reservation.h>
 787 +
 788 +#define tsk_mc2_data(t)		(tsk_rt(t)->mc2_data)
 789 +
 790 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
 791 +							struct reservation *res);
 792 +	
 793 +#endif /* __KERNEL__ */
 794 +
 795 +#endif
 796 \ No newline at end of file
 797 diff --git include/litmus/page_dev.h include/litmus/page_dev.h
 798 new file mode 100644
 799 index 0000000..c42ed05
 800 --- /dev/null
 801 +++ include/litmus/page_dev.h
 802 @@ -0,0 +1,23 @@
 803 +#ifndef _LITMUS_PAGE_DEV_H
 804 +#define _LITMUS_PAGE_DEV_H
 805 +
 806 +#include <linux/init.h>
 807 +#include <linux/types.h>
 808 +#include <linux/kernel.h>
 809 +#include <linux/module.h>
 810 +#include <linux/sysctl.h>
 811 +#include <linux/slab.h>
 812 +#include <linux/io.h>
 813 +#include <linux/mutex.h>
 814 +
 815 +#include <litmus/sched_trace.h>
 816 +#include <litmus/litmus.h>
 817 +
 818 +int llc_partition_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos);
 819 +int dram_partition_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos);
 820 +int bank_to_partition(unsigned int bank);
 821 +int get_area_index(int cpu);
 822 +int is_in_correct_bank(struct page* page, int cpu);
 823 +int is_in_llc_partition(struct page* page, int cpu);
 824 +
 825 +#endif /* _LITMUS_PAGE_DEV_H */
 826 \ No newline at end of file
 827 diff --git include/litmus/polling_reservations.h include/litmus/polling_reservations.h
 828 new file mode 100644
 829 index 0000000..66c9b1e
 830 --- /dev/null
 831 +++ include/litmus/polling_reservations.h
 832 @@ -0,0 +1,36 @@
 833 +#ifndef LITMUS_POLLING_RESERVATIONS_H
 834 +#define LITMUS_POLLING_RESERVATIONS_H
 835 +
 836 +#include <litmus/reservation.h>
 837 +
 838 +struct polling_reservation {
 839 +	/* extend basic reservation */
 840 +	struct reservation res;
 841 +
 842 +	lt_t max_budget;
 843 +	lt_t period;
 844 +	lt_t deadline;
 845 +	lt_t offset;
 846 +};
 847 +
 848 +void polling_reservation_init(struct polling_reservation *pres, int use_edf_prio,
 849 +	int use_periodic_polling, lt_t budget, lt_t period, lt_t deadline, lt_t offset);
 850 +
 851 +struct table_driven_reservation {
 852 +	/* extend basic reservation */
 853 +	struct reservation res;
 854 +
 855 +	lt_t major_cycle;
 856 +	unsigned int next_interval;
 857 +	unsigned int num_intervals;
 858 +	struct lt_interval *intervals;
 859 +
 860 +	/* info about current scheduling slot */
 861 +	struct lt_interval cur_interval;
 862 +	lt_t major_cycle_start;
 863 +};
 864 +
 865 +void table_driven_reservation_init(struct table_driven_reservation *tdres,
 866 +	lt_t major_cycle, struct lt_interval *intervals, unsigned int num_intervals);
 867 +
 868 +#endif
 869 diff --git include/litmus/replicate_lib.h include/litmus/replicate_lib.h
 870 new file mode 100644
 871 index 0000000..186837b
 872 --- /dev/null
 873 +++ include/litmus/replicate_lib.h
 874 @@ -0,0 +1,19 @@
 875 +#ifndef LITMUS_REPLICATE_LIB_H
 876 +#define LITMUS_REPLICATE_LIB_H
 877 +
 878 +#include <linux/list.h>
 879 +#include <linux/mm_types.h>
 880 +#include <linux/mm_inline.h>
 881 +
 882 +/* Data structure for the "master" list */
 883 +struct shared_lib_page {
 884 +	struct page *master_page;
 885 +	struct page *r_page[NR_CPUS+1];
 886 +	unsigned long int master_pfn;
 887 +	unsigned long int r_pfn[NR_CPUS+1];
 888 +	struct list_head list;
 889 +};
 890 +
 891 +extern struct list_head shared_lib_pages;
 892 +
 893 +#endif
 894 diff --git include/litmus/reservation.h include/litmus/reservation.h
 895 new file mode 100644
 896 index 0000000..7e022b3
 897 --- /dev/null
 898 +++ include/litmus/reservation.h
 899 @@ -0,0 +1,256 @@
 900 +#ifndef LITMUS_RESERVATION_H
 901 +#define LITMUS_RESERVATION_H
 902 +
 903 +#include <linux/list.h>
 904 +#include <linux/hrtimer.h>
 905 +
 906 +struct reservation_client;
 907 +struct reservation_environment;
 908 +struct reservation;
 909 +
 910 +typedef enum {
 911 +	/* reservation has no clients, is not consuming budget */
 912 +	RESERVATION_INACTIVE = 0,
 913 +
 914 +	/* reservation has clients, consumes budget when scheduled */
 915 +	RESERVATION_ACTIVE,
 916 +
 917 +	/* reservation has no clients, but may be consuming budget */
 918 +	RESERVATION_ACTIVE_IDLE,
 919 +
 920 +	/* Reservation has no budget and waits for
 921 +	 * replenishment. May or may not have clients. */
 922 +	RESERVATION_DEPLETED,
 923 +} reservation_state_t;
 924 +
 925 +
 926 +/* ************************************************************************** */
 927 +
 928 +/* Select which task to dispatch. If NULL is returned, it means there is nothing
 929 + * to schedule right now and background work can be scheduled. */
 930 +typedef struct task_struct * (*dispatch_t)  (
 931 +	struct reservation_client *client
 932 +);
 933 +
 934 +/* Something that can be managed in a reservation and that can yield
 935 + * a process for dispatching. Contains a pointer to the reservation
 936 + * to which it "belongs". */
 937 +struct reservation_client {
 938 +	struct list_head list;
 939 +	struct reservation* reservation;
 940 +	dispatch_t dispatch;
 941 +};
 942 +
 943 +
 944 +/* ************************************************************************** */
 945 +
 946 +/* Called by reservations to request state change. */
 947 +typedef void (*reservation_change_state_t)  (
 948 +	struct reservation_environment* env,
 949 +	struct reservation *res,
 950 +	reservation_state_t new_state
 951 +);
 952 +
 953 +/* The framework within wich reservations operate. */
 954 +struct reservation_environment {
 955 +	lt_t time_zero;
 956 +	lt_t current_time;
 957 +
 958 +	/* services invoked by reservations */
 959 +	reservation_change_state_t change_state;
 960 +};
 961 +
 962 +
 963 +/* ************************************************************************** */
 964 +
 965 +/* A new client is added or an existing client resumes. */
 966 +typedef void (*client_arrives_t)  (
 967 +	struct reservation *reservation,
 968 +	struct reservation_client *client
 969 +);
 970 +
 971 +/* A client suspends or terminates. */
 972 +typedef void (*client_departs_t)  (
 973 +	struct reservation *reservation,
 974 +	struct reservation_client *client,
 975 +	int did_signal_job_completion
 976 +);
 977 +
 978 +/* A previously requested replenishment has occurred. */
 979 +typedef void (*on_replenishment_timer_t)  (
 980 +	struct reservation *reservation
 981 +);
 982 +
 983 +/* Update the reservation's budget to reflect execution or idling. */
 984 +typedef void (*drain_budget_t) (
 985 +	struct reservation *reservation,
 986 +	lt_t how_much
 987 +);
 988 +
 989 +/* Select a ready task from one of the clients for scheduling. */
 990 +typedef struct task_struct* (*dispatch_client_t)  (
 991 +	struct reservation *reservation,
 992 +	lt_t *time_slice /* May be used to force rescheduling after
 993 +	                    some amount of time. 0 => no limit */
 994 +);
 995 +
 996 +
 997 +struct reservation_ops {
 998 +	dispatch_client_t dispatch_client;
 999 +
1000 +	client_arrives_t client_arrives;
1001 +	client_departs_t client_departs;
1002 +
1003 +	on_replenishment_timer_t replenish;
1004 +	drain_budget_t drain_budget;
1005 +};
1006 +
1007 +struct reservation {
1008 +	/* used to queue in environment */
1009 +	struct list_head list;
1010 +
1011 +	reservation_state_t state;
1012 +	unsigned int id;
1013 +
1014 +	/* exact meaning defined by impl. */
1015 +	lt_t priority;
1016 +	lt_t cur_budget;
1017 +	lt_t next_replenishment;
1018 +
1019 +	/* budget stats */
1020 +	lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
1021 +	lt_t budget_consumed_total;
1022 +
1023 +	/* interaction with framework */
1024 +	struct reservation_environment *env;
1025 +	struct reservation_ops *ops;
1026 +
1027 +	struct list_head clients;
1028 +	
1029 +	/* for global env. */
1030 +	int scheduled_on;
1031 +	int event_added;
1032 +	/* for blocked by ghost. Do not charge budget when ACTIVE */
1033 +	int blocked_by_ghost;
1034 +	/* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
1035 +	int is_ghost;
1036 +};
1037 +
1038 +void reservation_init(struct reservation *res);
1039 +
1040 +/* Default implementations */
1041 +
1042 +/* simply select the first client in the list, set *for_at_most to zero */
1043 +struct task_struct* default_dispatch_client(
1044 +	struct reservation *res,
1045 +	lt_t *for_at_most
1046 +);
1047 +
1048 +/* "connector" reservation client to hook up tasks with reservations */
1049 +struct task_client {
1050 +	struct reservation_client client;
1051 +	struct task_struct *task;
1052 +};
1053 +
1054 +void task_client_init(struct task_client *tc, struct task_struct *task,
1055 +	struct reservation *reservation);
1056 +
1057 +#define SUP_RESCHEDULE_NOW (0)
1058 +#define SUP_NO_SCHEDULER_UPDATE (ULLONG_MAX)
1059 +
1060 +/* A simple uniprocessor (SUP) flat (i.e., non-hierarchical) reservation
1061 + * environment.
1062 + */
1063 +struct sup_reservation_environment {
1064 +	struct reservation_environment env;
1065 +
1066 +	/* ordered by priority */
1067 +	struct list_head active_reservations;
1068 +
1069 +	/* ordered by next_replenishment */
1070 +	struct list_head depleted_reservations;
1071 +
1072 +	/* unordered */
1073 +	struct list_head inactive_reservations;
1074 +
1075 +	/* - SUP_RESCHEDULE_NOW means call sup_dispatch() now
1076 +	 * - SUP_NO_SCHEDULER_UPDATE means nothing to do
1077 +	 * any other value means program a timer for the given time
1078 +	 */
1079 +	lt_t next_scheduler_update;
1080 +	/* set to true if a call to sup_dispatch() is imminent */
1081 +	bool will_schedule;
1082 +};
1083 +
1084 +/* Contract:
1085 + *  - before calling into sup_ code, or any reservation methods,
1086 + *    update the time with sup_update_time(); and
1087 + *  - after calling into sup_ code, or any reservation methods,
1088 + *    check next_scheduler_update and program timer or trigger
1089 + *    scheduler invocation accordingly.
1090 + */
1091 +
1092 +void sup_init(struct sup_reservation_environment* sup_env);
1093 +void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
1094 +	struct reservation* new_res);
1095 +void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
1096 +	lt_t timeout);
1097 +void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
1098 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
1099 +
1100 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
1101 +	unsigned int id);
1102 +	
1103 +/* A global multiprocessor reservation environment. */
1104 +
1105 +typedef enum {
1106 +	EVENT_REPLENISH = 0,
1107 +	EVENT_DRAIN,
1108 +	EVENT_OTHERS,
1109 +} event_type_t;
1110 +
1111 +
1112 +struct next_timer_event {
1113 +	lt_t next_update;
1114 +	int timer_armed_on;
1115 +	unsigned int id;
1116 +	event_type_t type;
1117 +	struct list_head list;
1118 +};
1119 +
1120 +struct gmp_reservation_environment {
1121 +	raw_spinlock_t lock;
1122 +	struct reservation_environment env;
1123 +
1124 +	/* ordered by priority */
1125 +	struct list_head active_reservations;
1126 +
1127 +	/* ordered by next_replenishment */
1128 +	struct list_head depleted_reservations;
1129 +
1130 +	/* unordered */
1131 +	struct list_head inactive_reservations;
1132 +
1133 +	/* timer event ordered by next_update */
1134 +	struct list_head next_events;
1135 +	
1136 +	/* (schedule_now == true) means call gmp_dispatch() now */
1137 +	int schedule_now;
1138 +	/* set to true if a call to gmp_dispatch() is imminent */
1139 +	bool will_schedule;
1140 +};
1141 +
1142 +void gmp_init(struct gmp_reservation_environment* gmp_env);
1143 +void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
1144 +	struct reservation* new_res);
1145 +void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
1146 +	lt_t timeout, unsigned int id, event_type_t type);
1147 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
1148 +int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
1149 +struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
1150 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
1151 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
1152 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
1153 +	unsigned int id);
1154 +
1155 +#endif
1156 diff --git include/litmus/rt_param.h include/litmus/rt_param.h
1157 index 7b9a909..2044327 100644
1158 --- include/litmus/rt_param.h
1159 +++ include/litmus/rt_param.h
1160 @@ -62,6 +62,7 @@ typedef enum {
1161  #define LITMUS_MAX_PRIORITY     512
1162  #define LITMUS_HIGHEST_PRIORITY   1
1163  #define LITMUS_LOWEST_PRIORITY    (LITMUS_MAX_PRIORITY - 1)
1164 +#define LITMUS_NO_PRIORITY		UINT_MAX
1165  
1166  /* Provide generic comparison macros for userspace,
1167   * in case that we change this later. */
1168 @@ -71,6 +72,46 @@ typedef enum {
1169  	((p) >= LITMUS_HIGHEST_PRIORITY &&	\
1170  	 (p) <= LITMUS_LOWEST_PRIORITY)
1171  
1172 +/* reservation support */
1173 +
1174 +typedef enum {
1175 +	PERIODIC_POLLING,
1176 +	SPORADIC_POLLING,
1177 +	TABLE_DRIVEN,
1178 +} reservation_type_t;
1179 +
1180 +struct lt_interval {
1181 +	lt_t start;
1182 +	lt_t end;
1183 +};
1184 +
1185 +#ifndef __KERNEL__
1186 +#define __user
1187 +#endif
1188 +
1189 +struct reservation_config {
1190 +	unsigned int id;
1191 +	lt_t priority;
1192 +	int  cpu;
1193 +
1194 +	union {
1195 +		struct {
1196 +			lt_t period;
1197 +			lt_t budget;
1198 +			lt_t relative_deadline;
1199 +			lt_t offset;
1200 +		} polling_params;
1201 +
1202 +		struct {
1203 +			lt_t major_cycle_length;
1204 +			unsigned int num_intervals;
1205 +			struct lt_interval __user *intervals;
1206 +		} table_driven_params;
1207 +	};
1208 +};
1209 +
1210 +/* regular sporadic task support */
1211 +
1212  struct rt_task {
1213  	lt_t 		exec_cost;
1214  	lt_t 		period;
1215 @@ -120,7 +161,6 @@ struct control_page {
1216  	uint64_t ts_syscall_start;  /* Feather-Trace cycles */
1217  	uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
1218  				     * started. */
1219 -
1220  	/* to be extended */
1221  };
1222  
1223 @@ -165,6 +205,7 @@ struct rt_job {
1224  };
1225  
1226  struct pfair_param;
1227 +struct mc2_task;
1228  
1229  /*	RT task parameters for scheduling extensions
1230   *	These parameters are inherited during clone and therefore must
1231 @@ -246,7 +287,10 @@ struct rt_param {
1232  	volatile int		linked_on;
1233  
1234  	/* PFAIR/PD^2 state. Allocated on demand. */
1235 -	struct pfair_param*	pfair;
1236 +	union {
1237 +		void *plugin_state;
1238 +		struct pfair_param *pfair;
1239 +	};
1240  
1241  	/* Fields saved before BE->RT transition.
1242  	 */
1243 @@ -275,6 +319,10 @@ struct rt_param {
1244  
1245  	/* Pointer to the page shared between userspace and kernel. */
1246  	struct control_page * ctrl_page;
1247 +
1248 +	/* Mixed-criticality specific data */
1249 +	struct mc2_task* mc2_data;
1250 +	unsigned long addr_ctrl_page;
1251  };
1252  
1253  #endif
1254 diff --git include/litmus/sched_plugin.h include/litmus/sched_plugin.h
1255 index 0ccccd6..4c8aaa6 100644
1256 --- include/litmus/sched_plugin.h
1257 +++ include/litmus/sched_plugin.h
1258 @@ -77,6 +77,17 @@ typedef long (*wait_for_release_at_t)(lt_t release_time);
1259  /* Informs the plugin when a synchronous release takes place. */
1260  typedef void (*synchronous_release_at_t)(lt_t time_zero);
1261  
1262 +/* How much budget has the current task consumed so far, and how much
1263 + * has it left? The default implementation ties into the per-task
1264 + * budget enforcement code. Plugins can override this to report
1265 + * reservation-specific values. */
1266 +typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining);
1267 +
1268 +/* Reservation creation/removal backends. Meaning of reservation_type and
1269 + * reservation_id are entirely plugin-specific. */
1270 +typedef long (*reservation_create_t)(int reservation_type, void* __user config);
1271 +typedef long (*reservation_destroy_t)(unsigned int reservation_id, int cpu);
1272 +
1273  /************************ misc routines ***********************/
1274  
1275  
1276 @@ -109,6 +120,12 @@ struct sched_plugin {
1277  	task_exit_t 		task_exit;
1278  	task_cleanup_t		task_cleanup;
1279  
1280 +	current_budget_t	current_budget;
1281 +
1282 +	/* Reservation support */
1283 +	reservation_create_t	reservation_create;
1284 +	reservation_destroy_t	reservation_destroy;
1285 +
1286  #ifdef CONFIG_LITMUS_LOCKING
1287  	/*	locking protocols	*/
1288  	allocate_lock_t		allocate_lock;
1289 diff --git include/litmus/sched_trace.h include/litmus/sched_trace.h
1290 index 82bde82..1b03778 100644
1291 --- include/litmus/sched_trace.h
1292 +++ include/litmus/sched_trace.h
1293 @@ -23,8 +23,7 @@ struct st_param_data {		/* regular params */
1294  	u32	period;
1295  	u32	phase;
1296  	u8	partition;
1297 -	u8	class;
1298 -	u8	__unused[2];
1299 +	u8	__unused[3];
1300  };
1301  
1302  struct st_release_data {	/* A job is was/is going to be released. */
1303 @@ -35,14 +34,12 @@ struct st_release_data {	/* A job is was/is going to be released. */
1304  struct st_assigned_data {	/* A job was asigned to a CPU. 		 */
1305  	u64	when;
1306  	u8	target;		/* Where should it execute?	         */
1307 -	u8	__unused[7];
1308 +	u8	__unused[3];
1309  };
1310  
1311  struct st_switch_to_data {	/* A process was switched to on a given CPU.   */
1312  	u64	when;		/* When did this occur?                        */
1313  	u32	exec_time;	/* Time the current job has executed.          */
1314 -	u8	__unused[4];
1315 -
1316  };
1317  
1318  struct st_switch_away_data {	/* A process was switched away from on a given CPU. */
1319 @@ -52,11 +49,10 @@ struct st_switch_away_data {	/* A process was switched away from on a given CPU.
1320  
1321  struct st_completion_data {	/* A job completed. */
1322  	u64	when;
1323 -	u8	forced:1; 	/* Set to 1 if job overran and kernel advanced to the
1324 +	u64	forced:1; 	/* Set to 1 if job overran and kernel advanced to the
1325  				 * next task automatically; set to 0 otherwise.
1326  				 */
1327 -	u8	__uflags:7;
1328 -	u8	__unused[7];
1329 +	u64	exec_time:63;
1330  };
1331  
1332  struct st_block_data {		/* A task blocks. */
1333 @@ -69,6 +65,16 @@ struct st_resume_data {		/* A task resumes. */
1334  	u64	__unused;
1335  };
1336  
1337 +struct st_np_enter_data {       /* A task becomes non-preemptable.  */
1338 +	u64     when;
1339 +	u64     __unused;
1340 +};
1341 +
1342 +struct st_np_exit_data {        /* A task becomes preemptable again. */
1343 +	u64     when;
1344 +	u64     __unused;
1345 +};
1346 +
1347  struct st_action_data {
1348  	u64	when;
1349  	u8	action;
1350 @@ -94,7 +100,10 @@ typedef enum {
1351  	ST_BLOCK,
1352  	ST_RESUME,
1353  	ST_ACTION,
1354 -	ST_SYS_RELEASE
1355 +	ST_SYS_RELEASE,
1356 +	ST_NP_ENTER,
1357 +	ST_NP_EXIT,
1358 +	ST_INVALID
1359  } st_event_record_type_t;
1360  
1361  struct st_event_record {
1362 @@ -113,6 +122,8 @@ struct st_event_record {
1363  		DATA(resume);
1364  		DATA(action);
1365  		DATA(sys_release);
1366 +		DATA(np_enter);
1367 +		DATA(np_exit);
1368  	} data;
1369  };
1370  
1371 diff --git include/litmus/trace.h include/litmus/trace.h
1372 index 6017872..24ca412 100644
1373 --- include/litmus/trace.h
1374 +++ include/litmus/trace.h
1375 @@ -118,6 +118,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
1376  #define TS_TICK_START(t)		CPU_TTIMESTAMP(110, t)
1377  #define TS_TICK_END(t) 			CPU_TTIMESTAMP(111, t)
1378  
1379 +#define TS_RELEASE_C_START		CPU_DTIMESTAMP(108, TSK_RT)
1380 +#define TS_RELEASE_C_END		CPU_DTIMESTAMP(109, TSK_RT)
1381 +
1382  #define TS_QUANTUM_BOUNDARY_START	CPU_TIMESTAMP_CUR(112)
1383  #define TS_QUANTUM_BOUNDARY_END		CPU_TIMESTAMP_CUR(113)
1384  
1385 @@ -137,6 +140,17 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
1386  #define TS_SEND_RESCHED_START(c)	MSG_TIMESTAMP_SENT(190, c)
1387  #define TS_SEND_RESCHED_END		MSG_TIMESTAMP_RECEIVED(191)
1388  
1389 -#define TS_RELEASE_LATENCY(when)	CPU_LTIMESTAMP(208, &(when))
1390 +#define TS_ISR_START			CPU_TIMESTAMP_CUR(192)
1391 +#define TS_ISR_END				CPU_TIMESTAMP_CUR(193)
1392 +
1393 +#define TS_RELEASE_LATENCY(when)    CPU_LTIMESTAMP(208, &(when))
1394 +#define TS_RELEASE_LATENCY_A(when)  CPU_LTIMESTAMP(209, &(when))
1395 +#define TS_RELEASE_LATENCY_B(when)  CPU_LTIMESTAMP(210, &(when))
1396 +#define TS_RELEASE_LATENCY_C(when)  CPU_LTIMESTAMP(211, &(when))
1397 +
1398 +#define TS_SCHED_A_START			CPU_DTIMESTAMP(212, TSK_UNKNOWN)
1399 +#define TS_SCHED_A_END(t)			CPU_TTIMESTAMP(213, t)
1400 +#define TS_SCHED_C_START			CPU_DTIMESTAMP(214, TSK_UNKNOWN)
1401 +#define TS_SCHED_C_END(t)			CPU_TTIMESTAMP(215, t)
1402  
1403  #endif /* !_SYS_TRACE_H_ */
1404 diff --git include/litmus/unistd_32.h include/litmus/unistd_32.h
1405 index 94264c2..664f73f 100644
1406 --- include/litmus/unistd_32.h
1407 +++ include/litmus/unistd_32.h
1408 @@ -17,5 +17,14 @@
1409  #define __NR_wait_for_ts_release __LSC(9)
1410  #define __NR_release_ts		__LSC(10)
1411  #define __NR_null_call		__LSC(11)
1412 +#define __NR_get_current_budget __LSC(12)
1413 +#define __NR_reservation_create	__LSC(13)
1414 +#define __NR_reservation_destroy	__LSC(14)
1415 +#define __NR_set_mc2_task_param	__LSC(15)
1416 +#define __NR_set_page_color		__LSC(16)
1417 +#define __NR_test_call		__LSC(17)
1418 +#define __NR_run_test		__LSC(18)
1419 +#define __NR_lock_buffer	__LSC(19)
1420 +#define __NR_recolor_mem	__LSC(20)
1421  
1422 -#define NR_litmus_syscalls 12
1423 +#define NR_litmus_syscalls	21
1424 diff --git include/litmus/unistd_64.h include/litmus/unistd_64.h
1425 index d5ced0d..994e8a5 100644
1426 --- include/litmus/unistd_64.h
1427 +++ include/litmus/unistd_64.h
1428 @@ -29,5 +29,23 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
1429  __SYSCALL(__NR_release_ts, sys_release_ts)
1430  #define __NR_null_call				__LSC(11)
1431  __SYSCALL(__NR_null_call, sys_null_call)
1432 +#define __NR_get_current_budget			__LSC(12)
1433 +__SYSCALL(__NR_get_current_budget, sys_get_current_budget)
1434 +#define __NR_reservation_create		__LSC(13)
1435 +__SYSCALL(__NR_reservation_create, sys_reservation_create)
1436 +#define __NR_reservation_destroy	__LSC(14)
1437 +__SYSCALL(__NR_reservation_destroy, sys_reservation_destroy)
1438 +#define __NR_set_mc2_task_param		__LSC(15)
1439 +__SYSCALL(__NR_set_mc2_task_param,	sys_set_mc2_task_param)
1440 +#define __NR_set_page_color			__LSC(16)
1441 +__SYSCALL(__NR_set_page_color,		sys_set_page_color)
1442 +#define __NR_test_call				__LSC(17)
1443 +__SYSCALL(__NR_test_call, sys_test_call)
1444 +#define __NR_run_test				__LSC(18)
1445 +__SYSCALL(__NR_run_test, sys_run_test)
1446 +#define __NR_lock_buffer			__LSC(19)
1447 +__SYSCALL(__NR_lock_buffer, sys_lock_buffer)
1448 +#define __NR_recolor_mem			__LSC(20)
1449 +__SYSCALL(__NR_recolor_mem, sys_recolor_mem)
1450  
1451 -#define NR_litmus_syscalls 12
1452 +#define NR_litmus_syscalls 21
1453 diff --git ipc/mqueue.c ipc/mqueue.c
1454 index 3aaea7f..4cb1b7f 100644
1455 --- ipc/mqueue.c
1456 +++ ipc/mqueue.c
1457 @@ -1004,7 +1004,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1458  
1459  	/* First try to allocate memory, before doing anything with
1460  	 * existing queues. */
1461 -	msg_ptr = load_msg(u_msg_ptr, msg_len);
1462 +	msg_ptr = load_msg(u_msg_ptr, msg_len, 0);
1463  	if (IS_ERR(msg_ptr)) {
1464  		ret = PTR_ERR(msg_ptr);
1465  		goto out_fput;
1466 diff --git ipc/msg.c ipc/msg.c
1467 index 2b6fdbb..da2b3e9 100644
1468 --- ipc/msg.c
1469 +++ ipc/msg.c
1470 @@ -38,6 +38,10 @@
1471  #include <linux/nsproxy.h>
1472  #include <linux/ipc_namespace.h>
1473  
1474 +#include <litmus/trace.h>
1475 +#include <litmus/litmus.h>
1476 +#include <asm/cacheflush.h>
1477 +
1478  #include <asm/current.h>
1479  #include <linux/uaccess.h>
1480  #include "util.h"
1481 @@ -619,7 +623,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
1482  	if (mtype < 1)
1483  		return -EINVAL;
1484  
1485 -	msg = load_msg(mtext, msgsz);
1486 +	msg = load_msg(mtext, msgsz, mtype);
1487  	if (IS_ERR(msg))
1488  		return PTR_ERR(msg);
1489  
1490 @@ -752,13 +756,13 @@ static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
1491  {
1492  	struct msgbuf __user *msgp = dest;
1493  	size_t msgsz;
1494 -
1495  	if (put_user(msg->m_type, &msgp->mtype))
1496  		return -EFAULT;
1497 -
1498  	msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
1499 +
1500  	if (store_msg(msgp->mtext, msg, msgsz))
1501  		return -EFAULT;
1502 +
1503  	return msgsz;
1504  }
1505  
1506 @@ -976,7 +980,6 @@ out_unlock1:
1507  		free_copy(copy);
1508  		return PTR_ERR(msg);
1509  	}
1510 -
1511  	bufsz = msg_handler(buf, msg, bufsz);
1512  	free_msg(msg);
1513  
1514 diff --git ipc/msgutil.c ipc/msgutil.c
1515 index 2b49159..1ffc1b2 100644
1516 --- ipc/msgutil.c
1517 +++ ipc/msgutil.c
1518 @@ -18,6 +18,12 @@
1519  #include <linux/utsname.h>
1520  #include <linux/proc_ns.h>
1521  #include <linux/uaccess.h>
1522 +#include <linux/dma-mapping.h>
1523 +#include <linux/mempool.h>
1524 +#include <litmus/cache_proc.h>
1525 +#include <litmus/trace.h>
1526 +#include <litmus/litmus.h>
1527 +#include <asm/cacheflush.h>
1528  
1529  #include "util.h"
1530  
1531 @@ -42,20 +48,35 @@ atomic_t nr_ipc_ns = ATOMIC_INIT(1);
1532  struct msg_msgseg {
1533  	struct msg_msgseg *next;
1534  	/* the next part of the message follows immediately */
1535 +	dma_addr_t	seg_handle;
1536 +	size_t seg_len;
1537  };
1538  
1539  #define DATALEN_MSG	((size_t)PAGE_SIZE-sizeof(struct msg_msg))
1540  #define DATALEN_SEG	((size_t)PAGE_SIZE-sizeof(struct msg_msgseg))
1541  
1542 +static dma_addr_t handle;
1543  
1544 -static struct msg_msg *alloc_msg(size_t len)
1545 +mempool_t *msgpool;
1546 +extern void *msgvaddr;
1547 +
1548 +static struct msg_msg *alloc_msg(size_t len, long mtype)
1549  {
1550  	struct msg_msg *msg;
1551  	struct msg_msgseg **pseg;
1552  	size_t alen;
1553 +	int n_seg = 0;
1554  
1555  	alen = min(len, DATALEN_MSG);
1556 -	msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
1557 +	if (mtype == 1) {
1558 +		msg = dma_alloc_coherent(NULL, sizeof(*msg) + alen, &handle, GFP_KERNEL|GFP_COLOR);
1559 +		msg->handle = handle;
1560 +		msg->alloc_len = sizeof(*msg) + alen;
1561 +	} else if (mtype == 2) {
1562 +		msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL|GFP_COLOR);
1563 +	} else {
1564 +		msg = msgvaddr;
1565 +	}
1566  	if (msg == NULL)
1567  		return NULL;
1568  
1569 @@ -67,7 +88,16 @@ static struct msg_msg *alloc_msg(size_t len)
1570  	while (len > 0) {
1571  		struct msg_msgseg *seg;
1572  		alen = min(len, DATALEN_SEG);
1573 -		seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL);
1574 +		if (mtype == 1) {
1575 +			seg = dma_alloc_coherent(NULL, sizeof(*seg) + alen, &handle, GFP_KERNEL|GFP_COLOR);
1576 +			seg->seg_handle = handle;
1577 +			seg->seg_len = alen;
1578 +		} else if (mtype == 2) {
1579 +			seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL|GFP_COLOR);
1580 +		} else {
1581 +			n_seg++;
1582 +			seg = msgvaddr + PAGE_SIZE*n_seg;
1583 +		}
1584  		if (seg == NULL)
1585  			goto out_err;
1586  		*pseg = seg;
1587 @@ -83,18 +113,19 @@ out_err:
1588  	return NULL;
1589  }
1590  
1591 -struct msg_msg *load_msg(const void __user *src, size_t len)
1592 +struct msg_msg *load_msg(const void __user *src, size_t len, long mtype)
1593  {
1594  	struct msg_msg *msg;
1595  	struct msg_msgseg *seg;
1596  	int err = -EFAULT;
1597  	size_t alen;
1598  
1599 -	msg = alloc_msg(len);
1600 +	msg = alloc_msg(len, mtype);
1601  	if (msg == NULL)
1602  		return ERR_PTR(-ENOMEM);
1603  
1604  	alen = min(len, DATALEN_MSG);
1605 +
1606  	if (copy_from_user(msg + 1, src, alen))
1607  		goto out_err;
1608  
1609 @@ -105,6 +136,9 @@ struct msg_msg *load_msg(const void __user *src, size_t len)
1610  		if (copy_from_user(seg + 1, src, alen))
1611  			goto out_err;
1612  	}
1613 +	if (mtype == 3) {
1614 +		cache_lockdown(0xFFFF8000, smp_processor_id());
1615 +	}
1616  
1617  	err = security_msg_msg_alloc(msg);
1618  	if (err)
1619 @@ -172,14 +206,22 @@ int store_msg(void __user *dest, struct msg_msg *msg, size_t len)
1620  void free_msg(struct msg_msg *msg)
1621  {
1622  	struct msg_msgseg *seg;
1623 -
1624 +	long mtype = msg->m_type;
1625 +	
1626  	security_msg_msg_free(msg);
1627  
1628  	seg = msg->next;
1629 -	kfree(msg);
1630 +	if (mtype == 1) {
1631 +		dma_free_coherent(NULL, msg->alloc_len, msg, msg->handle);
1632 +	} else if (mtype != 3) {
1633 +		kfree(msg);
1634 +	}
1635  	while (seg != NULL) {
1636  		struct msg_msgseg *tmp = seg->next;
1637 -		kfree(seg);
1638 +		if (mtype == 1) {
1639 +			dma_free_coherent(NULL, sizeof(*seg)+(seg->seg_len), seg, seg->seg_handle);
1640 +		} else if (mtype != 3)
1641 +			kfree(seg);
1642  		seg = tmp;
1643  	}
1644  }
1645 diff --git ipc/util.h ipc/util.h
1646 index 1a5a0fc..9a29983 100644
1647 --- ipc/util.h
1648 +++ ipc/util.h
1649 @@ -149,7 +149,7 @@ int ipc_parse_version(int *cmd);
1650  #endif
1651  
1652  extern void free_msg(struct msg_msg *msg);
1653 -extern struct msg_msg *load_msg(const void __user *src, size_t len);
1654 +extern struct msg_msg *load_msg(const void __user *src, size_t len, long mtype);
1655  extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst);
1656  extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
1657  
1658 diff --git kernel/sched/litmus.c kernel/sched/litmus.c
1659 index 9d58690..cd36358 100644
1660 --- kernel/sched/litmus.c
1661 +++ kernel/sched/litmus.c
1662 @@ -20,8 +20,9 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p)
1663  	/* task counter */
1664  	p->se.sum_exec_runtime += delta;
1665  	if (delta) {
1666 -		TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
1667 -			delta, p->rt_param.job_params.exec_time, budget_remaining(p));
1668 +		//TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
1669 +			//delta, p->rt_param.job_params.exec_time, budget_remaining(p));
1670 +		;
1671  	}
1672  	/* sched_clock() */
1673  	p->se.exec_start = rq->clock;
1674 diff --git litmus/Kconfig litmus/Kconfig
1675 index babb43d..8f38bc3 100644
1676 --- litmus/Kconfig
1677 +++ litmus/Kconfig
1678 @@ -372,4 +372,26 @@ config PREEMPT_STATE_TRACE
1679  
1680  endmenu
1681  
1682 +menu "Memory Management"
1683 +
1684 +choice
1685 +	prompt "UVC buffer allocation"
1686 +	default	UVC_ALLOC_LEVEL_C
1687 +	help
1688 +	 Determine the location of frame buffers used by UVC (USB Video Class) devices. Other data structures such as URB (USB Request Block) and V4L2 (Video for Linux Version 2) use Level-C/OS banks.
1689 +	 
1690 +	config UVC_ALLOC_LEVEL_C
1691 +		bool "Level-C/OS"
1692 +	help
1693 +	  Allocate frame buffers in Level-C/OS banks.
1694 +	  
1695 +	config UVC_ALLOC_LEVEL_AB
1696 +		bool "Level-A/B"
1697 +	help
1698 +	  Allocate frame buffers in the task's Level-A/B bank. If this option is selected, the task that uses a camera must be a Level-A or -B task.
1699 +
1700 +endchoice
1701 +
1702 +endmenu
1703 +
1704  endmenu
1705 diff --git litmus/Makefile litmus/Makefile
1706 index 7970cd5..29ae4b0 100644
1707 --- litmus/Makefile
1708 +++ litmus/Makefile
1709 @@ -11,6 +11,7 @@ obj-y     = sched_plugin.o litmus.o \
1710  	    sync.o \
1711  	    rt_domain.o \
1712  	    edf_common.o \
1713 +		mc2_common.o \
1714  	    fp_common.o \
1715  	    fdso.o \
1716  	    locking.o \
1717 @@ -19,13 +20,20 @@ obj-y     = sched_plugin.o litmus.o \
1718  	    binheap.o \
1719  	    ctrldev.o \
1720  	    uncachedev.o \
1721 +		reservation.o \
1722 +		polling_reservations.o \
1723  	    sched_gsn_edf.o \
1724  	    sched_psn_edf.o \
1725 -	    sched_pfp.o
1726 +	    sched_pfp.o \
1727 +		sched_mc2.o \
1728 +		bank_proc.o \
1729 +	    color_shm.o \
1730 +		replicate_lib.o \
1731 +		cache_proc.o \
1732 +		page_dev.o
1733  
1734  obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
1735  obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
1736 -
1737  obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
1738  obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
1739  obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
1740 diff --git litmus/bank_proc.c litmus/bank_proc.c
1741 new file mode 100644
1742 index 0000000..3c4f703
1743 --- /dev/null
1744 +++ litmus/bank_proc.c
1745 @@ -0,0 +1,775 @@
1746 +/*
1747 + * bank_proc.c -- Implementation of the page coloring for cache and bank partition. 
1748 + *                The file will keep a pool of colored pages. Users can require pages with 
1749 + *                specific color or bank number.
1750 + *                Part of the code is modified from Jonathan Herman's code  
1751 + */
1752 +#include <linux/init.h>
1753 +#include <linux/types.h>
1754 +#include <linux/kernel.h>
1755 +#include <linux/module.h>
1756 +#include <linux/sysctl.h>
1757 +#include <linux/slab.h>
1758 +#include <linux/io.h>
1759 +#include <linux/mutex.h>
1760 +#include <linux/mm.h>
1761 +#include <linux/random.h>
1762 +
1763 +#include <litmus/litmus_proc.h>
1764 +#include <litmus/sched_trace.h>
1765 +#include <litmus/litmus.h>
1766 +
1767 +#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
1768 +
1769 +// This Address Decoding is used in imx6-sabredsd platform
1770 +#define BANK_MASK  0x38000000     
1771 +#define BANK_SHIFT  27
1772 +#define CACHE_MASK  0x0000f000      
1773 +#define CACHE_SHIFT 12
1774 +
1775 +#define PAGES_PER_COLOR 2000
1776 +#define NUM_BANKS	8
1777 +#define NUM_COLORS	16
1778 +
1779 +unsigned int NUM_PAGE_LIST;  //8*16
1780 +
1781 +unsigned int number_banks;
1782 +unsigned int number_cachecolors;
1783 +
1784 +unsigned int set_partition_max = 0x0000ffff;
1785 +unsigned int set_partition_min = 0;
1786 +unsigned int bank_partition_max = 0x000000ff;
1787 +unsigned int bank_partition_min = 0;
1788 +
1789 +int show_page_pool = 0;
1790 +int refill_page_pool = 0;
1791 +spinlock_t reclaim_lock;
1792 +
1793 +unsigned int set_partition[9] = {
1794 +        0x00000003,  /* Core 0, and Level A*/
1795 +        0x00000003,  /* Core 0, and Level B*/
1796 +        0x0000000C,  /* Core 1, and Level A*/
1797 +        0x0000000C,  /* Core 1, and Level B*/
1798 +        0x00000030,  /* Core 2, and Level A*/
1799 +        0x00000030,  /* Core 2, and Level B*/
1800 +        0x000000C0,  /* Core 3, and Level A*/
1801 +        0x000000C0,  /* Core 3, and Level B*/
1802 +        0x0000ff00,  /* Level C */
1803 +};
1804 +
1805 +unsigned int bank_partition[9] = {
1806 +        0x00000010,  /* Core 0, and Level A*/
1807 +        0x00000010,  /* Core 0, and Level B*/
1808 +        0x00000020,  /* Core 1, and Level A*/
1809 +        0x00000020,  /* Core 1, and Level B*/
1810 +        0x00000040,  /* Core 2, and Level A*/
1811 +        0x00000040,  /* Core 2, and Level B*/
1812 +        0x00000080,  /* Core 3, and Level A*/
1813 +        0x00000080,  /* Core 3, and Level B*/
1814 +        0x0000000c,  /* Level C */
1815 +};
1816 +
1817 +unsigned int set_index[9] = {
1818 +    0, 0, 0, 0, 0, 0, 0, 0, 0
1819 +};
1820 +
1821 +unsigned int bank_index[9] = {
1822 +    0, 0, 0, 0, 0, 0, 0, 0, 0
1823 +};
1824 +
1825 +int node_index[9] = {
1826 +    -1, -1, -1, -1, -1, -1, -1, -1, -1
1827 +};
1828 +
1829 +struct mutex void_lockdown_proc;
1830 +
1831 +/*
1832 + * Every page list should contain a lock, a list, and a number recording how many pages it store
1833 + */ 
1834 +struct color_group {
1835 +	spinlock_t lock;
1836 +	char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
1837 +	struct list_head list;
1838 +	atomic_t nr_pages;
1839 +};
1840 +
1841 +
1842 +static struct color_group *color_groups;
1843 +
1844 +/*
1845 + * Naive function to count the number of 1's
1846 + */
1847 +unsigned int counting_one_set(unsigned int v)
1848 +{
1849 +    unsigned int c; // c accumulates the total bits set in v
1850 +
1851 +    for (c = 0; v; v >>= 1)
1852 +    {
1853 +        c += v & 1;
1854 +    }
1855 +    return c;
1856 +}
1857 +
1858 +unsigned int two_exp(unsigned int e)
1859 +{
1860 +    unsigned int v = 1;
1861 +    for (; e>0; e-- )
1862 +    {
1863 +        v=v*2;
1864 +    }
1865 +    return v;
1866 +}
1867 +
1868 +/* helper functions to find the next colored pool index */
1869 +static inline unsigned int first_index(unsigned long node)
1870 +{
1871 +	unsigned int bank_no = 0, color_no = 0;
1872 +	
1873 +	while(bank_no < NUM_BANKS) {
1874 +		if ((bank_partition[node]>>bank_no) & 0x1)
1875 +			break;
1876 +		bank_no++;
1877 +	}
1878 +	while(color_no < NUM_COLORS) {
1879 +		if ((set_partition[node]>>color_no) & 0x1)
1880 +			break;
1881 +		color_no++;
1882 +	}
1883 +	return NUM_COLORS*bank_no + color_no; 
1884 +}
1885 +
1886 +static inline unsigned int last_index(unsigned long node)
1887 +{
1888 +	unsigned int bank_no = NUM_BANKS-1, color_no = NUM_COLORS-1;
1889 +	
1890 +	while(bank_no >= 0) {
1891 +		if ((bank_partition[node]>>bank_no) & 0x1)
1892 +			break;
1893 +		bank_no--;
1894 +	}
1895 +	while(color_no >= 0) {
1896 +		if ((set_partition[node]>>color_no) & 0x1)
1897 +			break;
1898 +		color_no--;
1899 +	}
1900 +	return NUM_COLORS*bank_no + color_no; 
1901 +}
1902 +
1903 +static inline unsigned int next_color(unsigned long node, unsigned int current_color)
1904 +{
1905 +	int try = 0, ret = 0;
1906 +	current_color++;
1907 +	if (current_color == NUM_COLORS) {
1908 +		current_color = 0;
1909 +		ret = 1;
1910 +	}
1911 +	
1912 +	while (try < NUM_COLORS) {
1913 +		if ((set_partition[node]>>current_color)&0x1)
1914 +			break;
1915 +		current_color++;
1916 +		if (current_color == NUM_COLORS) {
1917 +			current_color = 0;
1918 +			ret = 1;
1919 +		}
1920 +		try++;
1921 +	}
1922 +	if (!ret)
1923 +		return current_color;
1924 +	else
1925 +		return current_color + NUM_COLORS;
1926 +}
1927 +
1928 +static inline unsigned int next_bank(unsigned long node, unsigned int current_bank)
1929 +{
1930 +	int try = 0;
1931 +	current_bank++;
1932 +	if (current_bank == NUM_BANKS) {
1933 +		current_bank = 0;
1934 +	}
1935 +	
1936 +	while (try < NUM_BANKS) {
1937 +		if ((bank_partition[node]>>current_bank)&0x1)
1938 +			break;
1939 +		current_bank++;
1940 +		if (current_bank == NUM_BANKS) {
1941 +			current_bank = 0;
1942 +		}
1943 +		try++;
1944 +	}
1945 +	return current_bank;
1946 +}
1947 +
1948 +static inline unsigned int get_next_index(unsigned long node, unsigned int current_index)
1949 +{
1950 +	unsigned int bank_no, color_no, color_ret, bank_ret;
1951 +	bank_no = current_index>>4; // 2^4 = 16 colors
1952 +	color_no = current_index - bank_no*NUM_COLORS;
1953 +	bank_ret = bank_no;
1954 +	color_ret = next_color(node, color_no);
1955 +	if (color_ret >= NUM_COLORS) {
1956 +		// next bank
1957 +		color_ret -= NUM_COLORS;
1958 +		bank_ret = next_bank(node, bank_no);
1959 +	}
1960 +
1961 +	return bank_ret * NUM_COLORS + color_ret;
1962 +}
1963 +
1964 +/* Decoding page color, 0~15 */ 
1965 +static inline unsigned int page_color(struct page *page)
1966 +{
1967 +	return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
1968 +}
1969 +
1970 +/* Decoding page bank number, 0~7 */ 
1971 +static inline unsigned int page_bank(struct page *page)
1972 +{
1973 +	return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
1974 +}
1975 +
1976 +static inline unsigned int page_list_index(struct page *page)
1977 +{
1978 +    unsigned int idx;  
1979 +    idx = (page_color(page) + page_bank(page)*(number_cachecolors));
1980 +
1981 +    return idx; 
1982 +}
1983 +
1984 +
1985 +
1986 +/*
1987 + * It is used to determine the smallest number of page lists. 
1988 + */
1989 +static unsigned long smallest_nr_pages(void)
1990 +{
1991 +	unsigned long i, min_pages;
1992 +	struct color_group *cgroup;
1993 +	cgroup = &color_groups[16*2];
1994 +	min_pages =atomic_read(&cgroup->nr_pages); 
1995 +	for (i = 16*2; i < NUM_PAGE_LIST; ++i) {
1996 +		cgroup = &color_groups[i];
1997 +		if (atomic_read(&cgroup->nr_pages) < min_pages)
1998 +			min_pages = atomic_read(&cgroup->nr_pages);
1999 +	}
2000 +	return min_pages;
2001 +}
2002 +
2003 +static void show_nr_pages(void)
2004 +{
2005 +	unsigned long i;
2006 +	struct color_group *cgroup;
2007 +	printk("show nr pages***************************************\n");
2008 +	for (i = 0; i < NUM_PAGE_LIST; ++i) {
2009 +		cgroup = &color_groups[i];
2010 +		printk("(%03ld) =  %03d, ", i, atomic_read(&cgroup->nr_pages));
2011 +		if((i % 8) ==7) {
2012 +		    printk("\n");
2013 +		}
2014 +	}
2015 +}
2016 +
2017 +/*
2018 + * Add a page to current pool.
2019 + */
2020 +void add_page_to_color_list(struct page *page)
2021 +{
2022 +	const unsigned long color = page_list_index(page);
2023 +	struct color_group *cgroup = &color_groups[color];
2024 +	BUG_ON(in_list(&page->lru) || PageLRU(page));
2025 +	BUG_ON(page_count(page) > 1);
2026 +	spin_lock(&cgroup->lock);
2027 +	list_add_tail(&page->lru, &cgroup->list);
2028 +	atomic_inc(&cgroup->nr_pages);
2029 +	SetPageLRU(page);
2030 +	spin_unlock(&cgroup->lock);
2031 +}
2032 +
2033 +/*
2034 + * Replenish the page pool. 
2035 + * If the newly allocate page is what we want, it will be pushed to the correct page list
2036 + * otherwise, it will be freed. 
2037 + * A user needs to invoke this function until the page pool has enough pages.
2038 + */
2039 +static int do_add_pages(void)
2040 +{
2041 +	struct page *page, *page_tmp;
2042 +	LIST_HEAD(free_later);
2043 +	unsigned long color;
2044 +	int ret = 0;
2045 +	int i = 0;
2046 +	int free_counter = 0;
2047 +	unsigned long counter[128]= {0}; 
2048 +        
2049 +	// until all the page lists contain enough pages 
2050 +	for (i=0; i< 1024*20;i++) {
2051 +		page = alloc_page(GFP_HIGHUSER_MOVABLE);
2052 +	
2053 +		if (unlikely(!page)) {
2054 +			printk(KERN_WARNING "Could not allocate pages.\n");
2055 +			ret = -ENOMEM;
2056 +			goto out;
2057 +		}
2058 +		color = page_list_index(page);
2059 +		counter[color]++;
2060 +		if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=0) {
2061 +			add_page_to_color_list(page);
2062 +		} else {
2063 +			// Pages here will be freed later 
2064 +			list_add_tail(&page->lru, &free_later);
2065 +			free_counter++;
2066 +		}
2067 +	}
2068 +
2069 +	// Free the unwanted pages
2070 +	list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
2071 +		list_del(&page->lru);
2072 +		__free_page(page);
2073 +	}
2074 +out:
2075 +        return ret;
2076 +}
2077 +
2078 +/*
2079 + * Provide pages for replacement according cache color 
2080 + * This should be the only implementation here
2081 + * This function should not be accessed by others directly. 
2082 + * 
2083 + */ 
2084 +static struct page *new_alloc_page_color( unsigned long color)
2085 +{
2086 +//	printk("allocate new page color = %d\n", color);	
2087 +	struct color_group *cgroup;
2088 +	struct page *rPage = NULL;
2089 +		
2090 +	if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
2091 +		TRACE_CUR("Wrong color %lu\n", color);	
2092 +		goto out;
2093 +	}
2094 +
2095 +		
2096 +	cgroup = &color_groups[color];
2097 +	spin_lock(&cgroup->lock);
2098 +	if (unlikely(!atomic_read(&cgroup->nr_pages))) {
2099 +		TRACE_CUR("No free %lu colored pages.\n", color);
2100 +		goto out_unlock;
2101 +	}
2102 +	rPage = list_first_entry(&cgroup->list, struct page, lru);
2103 +	BUG_ON(page_count(rPage) > 1);
2104 +	//get_page(rPage);
2105 +	list_del(&rPage->lru);
2106 +	atomic_dec(&cgroup->nr_pages);
2107 +	ClearPageLRU(rPage);
2108 +out_unlock:
2109 +	spin_unlock(&cgroup->lock);
2110 +out:
2111 +	return rPage;
2112 +}
2113 +
2114 +struct page* get_colored_page(unsigned long color)
2115 +{
2116 +	return new_alloc_page_color(color);
2117 +}
2118 +
2119 +/*
2120 + * provide pages for replacement according to  
2121 + * node = 0 for Level A tasks in Cpu 0
2122 + * node = 1 for Level B tasks in Cpu 0
2123 + * node = 2 for Level A tasks in Cpu 1
2124 + * node = 3 for Level B tasks in Cpu 1
2125 + * node = 4 for Level A tasks in Cpu 2
2126 + * node = 5 for Level B tasks in Cpu 2
2127 + * node = 6 for Level A tasks in Cpu 3
2128 + * node = 7 for Level B tasks in Cpu 3
2129 + * node = 8 for Level C tasks 
2130 + */
2131 +struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
2132 +{
2133 +	struct page *rPage = NULL;
2134 +	int try = 0;
2135 +	unsigned int idx;
2136 +	
2137 +	if (node_index[node] == -1)
2138 +		idx = first_index(node);
2139 +	else
2140 +		idx = node_index[node];
2141 +	
2142 +	BUG_ON(idx<0 || idx>127);
2143 +	rPage =  new_alloc_page_color(idx);
2144 +	if (node_index[node] == last_index(node))
2145 +		node_index[node] = first_index(node);
2146 +	else
2147 +		node_index[node]++;
2148 +
2149 +	while (!rPage)  {
2150 +		try++;
2151 +		if (try>=256)
2152 +			break;
2153 +		idx = get_next_index(node, idx);
2154 +		printk(KERN_ALERT "try = %d out of page! requesting node  = %ld, idx = %d\n", try, node, idx);
2155 +		BUG_ON(idx<0 || idx>127);
2156 +		rPage = new_alloc_page_color(idx);
2157 +	}
2158 +	node_index[node] = idx;
2159 +	return rPage; 
2160 +}
2161 +
2162 +
2163 +/*
2164 + * Reclaim pages.
2165 + */
2166 +void reclaim_page(struct page *page)
2167 +{
2168 +	const unsigned long color = page_list_index(page);
2169 +	spin_lock(&reclaim_lock);
2170 +    	put_page(page);
2171 +	add_page_to_color_list(page);
2172 +
2173 +	spin_unlock(&reclaim_lock);
2174 +	printk("Reclaimed page(%ld) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
2175 +}
2176 +
2177 +
2178 +/*
2179 + * Initialize the numbers of banks and cache colors 
2180 + */ 
2181 +static void __init init_variables(void)
2182 +{
2183 +	number_banks = counting_one_set(BANK_MASK); 
2184 +	number_banks = two_exp(number_banks); 
2185 +
2186 +	number_cachecolors = counting_one_set(CACHE_MASK);
2187 +	number_cachecolors = two_exp(number_cachecolors);
2188 +	NUM_PAGE_LIST = number_banks * number_cachecolors; 
2189 +        printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
2190 +	mutex_init(&void_lockdown_proc);
2191 +	spin_lock_init(&reclaim_lock);
2192 +
2193 +}
2194 +
2195 +
2196 +/*
2197 + * Initialize the page pool 
2198 + */
2199 +static int __init init_color_groups(void)
2200 +{
2201 +	struct color_group *cgroup;
2202 +	unsigned long i;
2203 +	int err = 0;
2204 +
2205 +        printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
2206 +        color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
2207 +
2208 +	if (!color_groups) {
2209 +		printk(KERN_WARNING "Could not allocate color groups.\n");
2210 +		err = -ENOMEM;
2211 +	}else{
2212 +
2213 +		for (i = 0; i < NUM_PAGE_LIST; ++i) {
2214 +			cgroup = &color_groups[i];
2215 +			atomic_set(&cgroup->nr_pages, 0);
2216 +			INIT_LIST_HEAD(&cgroup->list);
2217 +			spin_lock_init(&cgroup->lock);
2218 +		}
2219 +	}
2220 +        return err;
2221 +}
2222 +
2223 +int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2224 +		size_t *lenp, loff_t *ppos)
2225 +{
2226 +	int ret = 0, i = 0;
2227 +	mutex_lock(&void_lockdown_proc);
2228 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2229 +	if (ret)
2230 +		goto out;
2231 +	if (write) {
2232 +            printk("New set Partition : \n");
2233 +	    for(i =0;i <9;i++)
2234 +            {
2235 +                set_index[i] = 0;
2236 +                printk("set[%d] = %x \n", i, set_partition[i]);
2237 +            }
2238 +	}
2239 +out:
2240 +	mutex_unlock(&void_lockdown_proc);
2241 +	return ret;
2242 +}
2243 +
2244 +int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2245 +		size_t *lenp, loff_t *ppos)
2246 +{
2247 +	int ret = 0, i = 0;
2248 +	mutex_lock(&void_lockdown_proc);
2249 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2250 +	if (ret)
2251 +		goto out;
2252 +	if (write) {
2253 +	    for(i =0;i <9;i++)
2254 +            {
2255 +                bank_index[i] = 0;
2256 +            }
2257 +	}
2258 +out:
2259 +	mutex_unlock(&void_lockdown_proc);
2260 +	return ret;
2261 +}
2262 +
2263 +int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
2264 +		size_t *lenp, loff_t *ppos)
2265 +{
2266 +	int ret = 0;
2267 +	mutex_lock(&void_lockdown_proc);
2268 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2269 +	if (ret)
2270 +		goto out;
2271 +	if (write) {
2272 +            show_nr_pages();
2273 +	}
2274 +out:
2275 +	mutex_unlock(&void_lockdown_proc);
2276 +	return ret;
2277 +}
2278 +
2279 +int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
2280 +		size_t *lenp, loff_t *ppos)
2281 +{
2282 +	int ret = 0;
2283 +	mutex_lock(&void_lockdown_proc);
2284 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2285 +	if (ret)
2286 +		goto out;
2287 +	if (write) {
2288 +            do_add_pages();
2289 +			show_nr_pages();
2290 +	}
2291 +out:
2292 +	mutex_unlock(&void_lockdown_proc);
2293 +	return ret;
2294 +}
2295 +
2296 +/*
2297 +static struct ctl_table cache_table[] =
2298 +{
2299 +        
2300 +	{
2301 +		.procname	= "C0_LA_set",
2302 +		.mode		= 0666,
2303 +		.proc_handler	= set_partition_handler,
2304 +		.data		= &set_partition[0],
2305 +		.maxlen		= sizeof(set_partition[0]),
2306 +		.extra1		= &set_partition_min,
2307 +		.extra2		= &set_partition_max,
2308 +	},	
2309 +	{
2310 +		.procname	= "C0_LB_set",
2311 +		.mode		= 0666,
2312 +		.proc_handler	= set_partition_handler,
2313 +		.data		= &set_partition[1],
2314 +		.maxlen		= sizeof(set_partition[1]),
2315 +		.extra1		= &set_partition_min,
2316 +		.extra2		= &set_partition_max,
2317 +	},	
2318 +	{
2319 +		.procname	= "C1_LA_set",
2320 +		.mode		= 0666,
2321 +		.proc_handler	= set_partition_handler,
2322 +		.data		= &set_partition[2],
2323 +		.maxlen		= sizeof(set_partition[2]),
2324 +		.extra1		= &set_partition_min,
2325 +		.extra2		= &set_partition_max,
2326 +	},
2327 +	{
2328 +		.procname	= "C1_LB_set",
2329 +		.mode		= 0666,
2330 +		.proc_handler	= set_partition_handler,
2331 +		.data		= &set_partition[3],
2332 +		.maxlen		= sizeof(set_partition[3]),
2333 +		.extra1		= &set_partition_min,
2334 +		.extra2		= &set_partition_max,
2335 +	},
2336 +	{
2337 +		.procname	= "C2_LA_set",
2338 +		.mode		= 0666,
2339 +		.proc_handler	= set_partition_handler,
2340 +		.data		= &set_partition[4],
2341 +		.maxlen		= sizeof(set_partition[4]),
2342 +		.extra1		= &set_partition_min,
2343 +		.extra2		= &set_partition_max,
2344 +	},
2345 +	{
2346 +		.procname	= "C2_LB_set",
2347 +		.mode		= 0666,
2348 +		.proc_handler	= set_partition_handler,
2349 +		.data		= &set_partition[5],
2350 +		.maxlen		= sizeof(set_partition[5]),
2351 +		.extra1		= &set_partition_min,
2352 +		.extra2		= &set_partition_max,
2353 +	},
2354 +	{
2355 +		.procname	= "C3_LA_set",
2356 +		.mode		= 0666,
2357 +		.proc_handler	= set_partition_handler,
2358 +		.data		= &set_partition[6],
2359 +		.maxlen		= sizeof(set_partition[6]),
2360 +		.extra1		= &set_partition_min,
2361 +		.extra2		= &set_partition_max,
2362 +	},
2363 +	{
2364 +		.procname	= "C3_LB_set",
2365 +		.mode		= 0666,
2366 +		.proc_handler	= set_partition_handler,
2367 +		.data		= &set_partition[7],
2368 +		.maxlen		= sizeof(set_partition[7]),
2369 +		.extra1		= &set_partition_min,
2370 +		.extra2		= &set_partition_max,
2371 +	},	
2372 +	{
2373 +		.procname	= "Call_LC_set",
2374 +		.mode		= 0666,
2375 +		.proc_handler	= set_partition_handler,
2376 +		.data		= &set_partition[8],
2377 +		.maxlen		= sizeof(set_partition[8]),
2378 +		.extra1		= &set_partition_min,
2379 +		.extra2		= &set_partition_max,
2380 +	},	
2381 +	{
2382 +		.procname	= "C0_LA_bank",
2383 +		.mode		= 0666,
2384 +		.proc_handler	= bank_partition_handler,
2385 +		.data		= &bank_partition[0],
2386 +		.maxlen		= sizeof(set_partition[0]),
2387 +		.extra1		= &bank_partition_min,
2388 +		.extra2		= &bank_partition_max,
2389 +	},
2390 +	{
2391 +		.procname	= "C0_LB_bank",
2392 +		.mode		= 0666,
2393 +		.proc_handler	= bank_partition_handler,
2394 +		.data		= &bank_partition[1],
2395 +		.maxlen		= sizeof(set_partition[1]),
2396 +		.extra1		= &bank_partition_min,
2397 +		.extra2		= &bank_partition_max,
2398 +	},		
2399 +	{
2400 +		.procname	= "C1_LA_bank",
2401 +		.mode		= 0666,
2402 +		.proc_handler	= bank_partition_handler,
2403 +		.data		= &bank_partition[2],
2404 +		.maxlen		= sizeof(set_partition[2]),
2405 +		.extra1		= &bank_partition_min,
2406 +		.extra2		= &bank_partition_max,
2407 +	},
2408 +	{
2409 +		.procname	= "C1_LB_bank",
2410 +		.mode		= 0666,
2411 +		.proc_handler	= bank_partition_handler,
2412 +		.data		= &bank_partition[3],
2413 +		.maxlen		= sizeof(set_partition[3]),
2414 +		.extra1		= &bank_partition_min,
2415 +		.extra2		= &bank_partition_max,
2416 +	},
2417 +	{
2418 +		.procname	= "C2_LA_bank",
2419 +		.mode		= 0666,
2420 +		.proc_handler	= bank_partition_handler,
2421 +		.data		= &bank_partition[4],
2422 +		.maxlen		= sizeof(set_partition[4]),
2423 +		.extra1		= &bank_partition_min,
2424 +		.extra2		= &bank_partition_max,
2425 +	},	
2426 +	{
2427 +		.procname	= "C2_LB_bank",
2428 +		.mode		= 0666,
2429 +		.proc_handler	= bank_partition_handler,
2430 +		.data		= &bank_partition[5],
2431 +		.maxlen		= sizeof(set_partition[5]),
2432 +		.extra1		= &bank_partition_min,
2433 +		.extra2		= &bank_partition_max,
2434 +	},		
2435 +	{
2436 +		.procname	= "C3_LA_bank",
2437 +		.mode		= 0666,
2438 +		.proc_handler	= bank_partition_handler,
2439 +		.data		= &bank_partition[6],
2440 +		.maxlen		= sizeof(set_partition[6]),
2441 +		.extra1		= &bank_partition_min,
2442 +		.extra2		= &bank_partition_max,
2443 +	},	
2444 +	{
2445 +		.procname	= "C3_LB_bank",
2446 +		.mode		= 0666,
2447 +		.proc_handler	= bank_partition_handler,
2448 +		.data		= &bank_partition[7],
2449 +		.maxlen		= sizeof(set_partition[7]),
2450 +		.extra1		= &bank_partition_min,
2451 +		.extra2		= &bank_partition_max,
2452 +	},	
2453 +	{
2454 +		.procname	= "Call_LC_bank",
2455 +		.mode		= 0666,
2456 +		.proc_handler	= bank_partition_handler,
2457 +		.data		= &bank_partition[8],
2458 +		.maxlen		= sizeof(set_partition[8]),
2459 +		.extra1		= &bank_partition_min,
2460 +		.extra2		= &bank_partition_max,
2461 +	},	
2462 +	{
2463 +		.procname	= "show_page_pool",
2464 +		.mode		= 0666,
2465 +		.proc_handler	= show_page_pool_handler,
2466 +		.data		= &show_page_pool,
2467 +		.maxlen		= sizeof(show_page_pool),
2468 +	},		{
2469 +		.procname	= "refill_page_pool",
2470 +		.mode		= 0666,
2471 +		.proc_handler	= refill_page_pool_handler,
2472 +		.data		= &refill_page_pool,
2473 +		.maxlen		= sizeof(refill_page_pool),
2474 +	},	
2475 +	{ }
2476 +};
2477 +*/
2478 +/*
2479 +static struct ctl_table litmus_dir_table[] = {
2480 +	{
2481 +		.procname	= "litmus",
2482 + 		.mode		= 0555,
2483 +		.child		= cache_table,
2484 +	},
2485 +	{ }
2486 +};
2487 +*/
2488 +
2489 +//static struct ctl_table_header *litmus_sysctls;
2490 +
2491 +
2492 +/*
2493 + * Initialzie this proc 
2494 + */
2495 +static int __init litmus_color_init(void)
2496 +{
2497 +	int err=0;
2498 +        printk("Init bankproc.c\n");
2499 +/*
2500 +	init_variables();
2501 +
2502 +	printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
2503 +
2504 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
2505 +	if (!litmus_sysctls) {
2506 +		printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
2507 +		err = -EFAULT;
2508 +		goto out;
2509 +	}
2510 +
2511 +	init_color_groups();			
2512 +	do_add_pages();
2513 +*/
2514 +	printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
2515 +out:
2516 +	return err;
2517 +}
2518 +
2519 +module_init(litmus_color_init);
2520 +
2521 diff --git litmus/budget.c litmus/budget.c
2522 index 47bf78a..d67f4b3 100644
2523 --- litmus/budget.c
2524 +++ litmus/budget.c
2525 @@ -1,9 +1,11 @@
2526  #include <linux/sched.h>
2527  #include <linux/percpu.h>
2528  #include <linux/hrtimer.h>
2529 +#include <linux/uaccess.h>
2530  
2531  #include <litmus/litmus.h>
2532  #include <litmus/preempt.h>
2533 +#include <litmus/sched_plugin.h>
2534  
2535  #include <litmus/budget.h>
2536  
2537 @@ -113,4 +115,54 @@ static int __init init_budget_enforcement(void)
2538  	return 0;
2539  }
2540  
2541 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining)
2542 +{
2543 +	struct task_struct *t = current;
2544 +	unsigned long flags;
2545 +	s64 delta;
2546 +
2547 +	local_irq_save(flags);
2548 +
2549 +	delta = sched_clock_cpu(smp_processor_id()) - t->se.exec_start;
2550 +	if (delta < 0)
2551 +		delta = 0;
2552 +
2553 +	TRACE_CUR("current_budget: sc:%llu start:%llu lt_t:%llu delta:%lld exec-time:%llu rem:%llu\n",
2554 +		sched_clock_cpu(smp_processor_id()), t->se.exec_start,
2555 +		litmus_clock(), delta,
2556 +		tsk_rt(t)->job_params.exec_time,
2557 +		budget_remaining(t));
2558 +
2559 +	if (used_so_far)
2560 +		*used_so_far = tsk_rt(t)->job_params.exec_time + delta;
2561 +
2562 +	if (remaining) {
2563 +		*remaining = budget_remaining(t);
2564 +		if (*remaining > delta)
2565 +			*remaining -= delta;
2566 +		else
2567 +			*remaining = 0;
2568 +	}
2569 +
2570 +	local_irq_restore(flags);
2571 +}
2572 +
2573 +asmlinkage long sys_get_current_budget(
2574 +	lt_t __user * _expended,
2575 +	lt_t __user *_remaining)
2576 +{
2577 +	lt_t expended = 0, remaining = 0;
2578 +
2579 +	if (is_realtime(current))
2580 +		litmus->current_budget(&expended, &remaining);
2581 +
2582 +	if (_expended && put_user(expended, _expended))
2583 +		return -EFAULT;
2584 +
2585 +	if (_remaining && put_user(remaining, _remaining))
2586 +		return -EFAULT;
2587 +
2588 +	return 0;
2589 +}
2590 +
2591  module_init(init_budget_enforcement);
2592 diff --git litmus/cache_proc.c litmus/cache_proc.c
2593 new file mode 100644
2594 index 0000000..51d0c3d
2595 --- /dev/null
2596 +++ litmus/cache_proc.c
2597 @@ -0,0 +1,1426 @@
2598 +#include <asm/uaccess.h>
2599 +#include <linux/uaccess.h>
2600 +#include <linux/init.h>
2601 +#include <linux/types.h>
2602 +#include <linux/kernel.h>
2603 +#include <linux/module.h>
2604 +#include <linux/sysctl.h>
2605 +#include <linux/slab.h>
2606 +#include <linux/io.h>
2607 +#include <linux/mutex.h>
2608 +#include <linux/time.h>
2609 +#include <linux/random.h>
2610 +#include <linux/sched.h>
2611 +
2612 +#include <litmus/rt_param.h>
2613 +#include <litmus/litmus.h>
2614 +#include <litmus/litmus_proc.h>
2615 +#include <litmus/sched_trace.h>
2616 +#include <litmus/cache_proc.h>
2617 +#include <litmus/mc2_common.h>
2618 +
2619 +#include <asm/hardware/cache-l2x0.h>
2620 +#include <asm/cacheflush.h>
2621 +
2622 +#define UNLOCK_ALL	0x00000000 /* allocation in any way */
2623 +#define LOCK_ALL        (~UNLOCK_ALL)
2624 +#define MAX_NR_WAYS	16
2625 +#define MAX_NR_COLORS	16
2626 +#define CACHELINE_SIZE 32
2627 +#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
2628 +#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
2629 +
2630 +typedef struct cacheline
2631 +{
2632 +        int line[INTS_IN_CACHELINE];
2633 +} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
2634 +
2635 +void mem_lock(u32 lock_val, int cpu);
2636 +
2637 +/*
2638 + * unlocked_way[i] : allocation can occur in way i
2639 + *
2640 + * 0 = allocation can occur in the corresponding way
2641 + * 1 = allocation cannot occur in the corresponding way
2642 + */
2643 +u32 unlocked_way[MAX_NR_WAYS]  = {
2644 +	0xFFFFFFFE, /* way 0 unlocked */
2645 +	0xFFFFFFFD,
2646 +	0xFFFFFFFB,
2647 +	0xFFFFFFF7,
2648 +	0xFFFFFFEF, /* way 4 unlocked */
2649 +	0xFFFFFFDF,
2650 +	0xFFFFFFBF,
2651 +	0xFFFFFF7F,
2652 +	0xFFFFFEFF, /* way 8 unlocked */
2653 +	0xFFFFFDFF,
2654 +	0xFFFFFBFF,
2655 +	0xFFFFF7FF,
2656 +	0xFFFFEFFF, /* way 12 unlocked */
2657 +	0xFFFFDFFF,
2658 +	0xFFFFBFFF,
2659 +	0xFFFF7FFF,
2660 +};
2661 +
2662 +u32 nr_unlocked_way[MAX_NR_WAYS+1]  = {
2663 +	0x0000FFFF, /* all ways are locked. usable = 0*/
2664 +	0x0000FFFE, /* way ~0 unlocked. usable = 1 */
2665 +	0x0000FFFC,
2666 +	0x0000FFF8,
2667 +	0x0000FFF0,
2668 +	0x0000FFE0,
2669 +	0x0000FFC0,
2670 +	0x0000FF80,
2671 +	0x0000FF00,
2672 +	0x0000FE00,
2673 +	0x0000FC00,
2674 +	0x0000F800,
2675 +	0x0000F000,
2676 +	0x0000E000,
2677 +	0x0000C000,
2678 +	0x00008000,
2679 +	0x00000000, /* way ~15 unlocked. usable = 16 */
2680 +};
2681 +
2682 +u32 way_partition[4] = {
2683 +	0xfffffff0, /* cpu0 */
2684 +	0xffffff0f, /* cpu1 */
2685 +	0xfffff0ff, /* cpu2 */
2686 +	0xffff0fff, /* cpu3 */
2687 +};
2688 +
2689 +u32 way_partitions[9] = {
2690 +	0xffff00ff, /* cpu0 A */
2691 +	0xffff00ff, /* cpu0 B */
2692 +	0xffff00ff, /* cpu1 A */
2693 +	0xffff00ff, /* cpu1 B */
2694 +	0xffff00ff, /* cpu2 A */
2695 +	0xffff00ff, /* cpu2 B */
2696 +	0xffff00ff, /* cpu3 A */
2697 +	0xffff00ff, /* cpu3 B */
2698 +	0xffffff00, /* lv C */
2699 +};
2700 +
2701 +u32 prev_lockdown_d_reg[5] = {
2702 +	0x0000FF00,
2703 +	0x0000FF00,
2704 +	0x0000FF00,
2705 +	0x0000FF00,
2706 +	0x000000FF, /* share with level-C */
2707 +};
2708 +
2709 +u32 prev_lockdown_i_reg[5] = {
2710 +	0x0000FF00,
2711 +	0x0000FF00,
2712 +	0x0000FF00,
2713 +	0x0000FF00,
2714 +	0x000000FF, /* share with level-C */
2715 +};
2716 +
2717 +u32 prev_lbm_i_reg[8] = {
2718 +	0x00000000,
2719 +	0x00000000,
2720 +	0x00000000,
2721 +	0x00000000,
2722 +	0x00000000,
2723 +	0x00000000,
2724 +	0x00000000,
2725 +	0x00000000,
2726 +};
2727 +
2728 +u32 prev_lbm_d_reg[8] = {
2729 +	0x00000000,
2730 +	0x00000000,
2731 +	0x00000000,
2732 +	0x00000000,
2733 +	0x00000000,
2734 +	0x00000000,
2735 +	0x00000000,
2736 +	0x00000000,
2737 +};
2738 +
2739 +static void __iomem *cache_base;
2740 +static void __iomem *lockreg_d;
2741 +static void __iomem *lockreg_i;
2742 +
2743 +static u32 cache_id;
2744 +
2745 +struct mutex actlr_mutex;
2746 +struct mutex l2x0_prefetch_mutex;
2747 +struct mutex lockdown_proc;
2748 +static u32 way_partition_min;
2749 +static u32 way_partition_max;
2750 +
2751 +static int zero = 0;
2752 +static int one = 1;
2753 +
2754 +static int l1_prefetch_proc;
2755 +static int l2_prefetch_hint_proc;
2756 +static int l2_double_linefill_proc;
2757 +static int l2_data_prefetch_proc;
2758 +static int os_isolation;
2759 +static int use_part;
2760 +
2761 +static u32 debug_test_val;
2762 +struct mutex debug_mutex;
2763 +
2764 +u32 lockdown_reg[9] = {
2765 +	0x00000000,
2766 +	0x00000000,
2767 +	0x00000000,
2768 +	0x00000000,
2769 +	0x00000000,
2770 +	0x00000000,
2771 +	0x00000000,
2772 +	0x00000000,
2773 +};
2774 +	
2775 +
2776 +#define ld_d_reg(cpu) ({ int __cpu = cpu; \
2777 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
2778 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
2779 +#define ld_i_reg(cpu) ({ int __cpu = cpu; \
2780 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
2781 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
2782 +
2783 +int lock_all;
2784 +int nr_lockregs;
2785 +static raw_spinlock_t cache_lock;
2786 +static raw_spinlock_t prefetch_lock;
2787 +static void ***flusher_pages = NULL;
2788 +
2789 +extern void l2c310_flush_all(void);
2790 +
2791 +static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
2792 +{
2793 +	/* wait for cache operation by line or way to complete */
2794 +	while (readl_relaxed(reg) & mask)
2795 +		cpu_relax();
2796 +}
2797 +
2798 +#ifdef CONFIG_CACHE_L2X0 
2799 +static inline void cache_wait(void __iomem *reg, unsigned long mask)
2800 +{
2801 +	/* cache operations by line are atomic on PL310 */
2802 +}
2803 +#else
2804 +#define cache_wait	cache_wait_way
2805 +#endif
2806 +
2807 +static inline void cache_sync(void)
2808 +{
2809 +	void __iomem *base = cache_base;
2810 +
2811 +	writel_relaxed(0, base + L2X0_CACHE_SYNC);
2812 +	cache_wait(base + L2X0_CACHE_SYNC, 1);
2813 +}
2814 +
2815 +static void print_lockdown_registers(int cpu)
2816 +{
2817 +	int i;
2818 +	//for (i = 0; i < nr_lockregs; i++) {
2819 +	for (i = 0; i < 4; i++) {
2820 +		printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
2821 +				i, readl_relaxed(ld_d_reg(i)));
2822 +		printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
2823 +				i, readl_relaxed(ld_i_reg(i)));
2824 +	}
2825 +}
2826 +
2827 +static void test_lockdown(void *ignore)
2828 +{
2829 +	int i, cpu;
2830 +
2831 +	cpu = smp_processor_id();
2832 +	printk("Start lockdown test on CPU %d.\n", cpu);
2833 +
2834 +	for (i = 0; i < nr_lockregs; i++) {
2835 +		printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
2836 +		printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i));
2837 +	}
2838 +
2839 +	printk("Lockdown initial state:\n");
2840 +	print_lockdown_registers(cpu);
2841 +	printk("---\n");
2842 +
2843 +	for (i = 0; i < nr_lockregs; i++) {
2844 +		writel_relaxed(1, ld_d_reg(i));
2845 +		writel_relaxed(2, ld_i_reg(i));
2846 +	}
2847 +	printk("Lockdown all data=1 instr=2:\n");
2848 +	print_lockdown_registers(cpu);
2849 +	printk("---\n");
2850 +
2851 +	for (i = 0; i < nr_lockregs; i++) {
2852 +		writel_relaxed((1 << i), ld_d_reg(i));
2853 +		writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
2854 +	}
2855 +	printk("Lockdown varies:\n");
2856 +	print_lockdown_registers(cpu);
2857 +	printk("---\n");
2858 +
2859 +	for (i = 0; i < nr_lockregs; i++) {
2860 +		writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
2861 +		writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
2862 +	}
2863 +	printk("Lockdown all zero:\n");
2864 +	print_lockdown_registers(cpu);
2865 +
2866 +	printk("End lockdown test.\n");
2867 +}
2868 +
2869 +void litmus_setup_lockdown(void __iomem *base, u32 id)
2870 +{
2871 +	cache_base = base;
2872 +	cache_id = id;
2873 +	lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
2874 +	lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
2875 +    
2876 +	if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
2877 +		nr_lockregs = 8;
2878 +	} else {
2879 +		printk("Unknown cache ID!\n");
2880 +		nr_lockregs = 1;
2881 +	}
2882 +	
2883 +	mutex_init(&actlr_mutex);
2884 +	mutex_init(&l2x0_prefetch_mutex);
2885 +	mutex_init(&lockdown_proc);
2886 +	mutex_init(&debug_mutex);
2887 +	raw_spin_lock_init(&cache_lock);
2888 +	raw_spin_lock_init(&prefetch_lock);
2889 +	
2890 +	test_lockdown(NULL);
2891 +}
2892 +
2893 +int way_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2894 +		size_t *lenp, loff_t *ppos)
2895 +{
2896 +	int ret = 0, i;
2897 +	unsigned long flags;
2898 +	
2899 +	mutex_lock(&lockdown_proc);
2900 +	
2901 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2902 +	if (ret)
2903 +		goto out;
2904 +	
2905 +	if (write) {
2906 +		printk("Way-partition settings:\n");
2907 +		for (i = 0; i < 9; i++) {
2908 +			printk("0x%08X\n", way_partitions[i]);
2909 +		}
2910 +		for (i = 0; i < 4; i++) {
2911 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2912 +				       i * L2X0_LOCKDOWN_STRIDE);
2913 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2914 +				       i * L2X0_LOCKDOWN_STRIDE);
2915 +		}
2916 +	}
2917 +	
2918 +	local_irq_save(flags);
2919 +	print_lockdown_registers(smp_processor_id());
2920 +	l2c310_flush_all();
2921 +	local_irq_restore(flags);
2922 +out:
2923 +	mutex_unlock(&lockdown_proc);
2924 +	return ret;
2925 +}
2926 +
2927 +int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
2928 +		size_t *lenp, loff_t *ppos)
2929 +{
2930 +	int ret = 0, i;
2931 +	unsigned long flags;
2932 +	
2933 +	mutex_lock(&lockdown_proc);
2934 +	
2935 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2936 +	if (ret)
2937 +		goto out;
2938 +	
2939 +	if (write && lock_all == 1) {
2940 +		for (i = 0; i < nr_lockregs; i++) {
2941 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2942 +				       i * L2X0_LOCKDOWN_STRIDE);
2943 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2944 +				       i * L2X0_LOCKDOWN_STRIDE);
2945 +		}
2946 +/*		
2947 +		for (i = 0; i < nr_lockregs;  i++) {
2948 +			barrier();
2949 +			mem_lock(LOCK_ALL, i);
2950 +			barrier();
2951 +			//writel_relaxed(nr_unlocked_way[0], ld_d_reg(i));
2952 +			//writel_relaxed(nr_unlocked_way[0], ld_i_reg(i));
2953 +		}
2954 +*/		
2955 +	}
2956 +	if (write && lock_all == 0) {
2957 +		for (i = 0; i < nr_lockregs; i++) {
2958 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2959 +				       i * L2X0_LOCKDOWN_STRIDE);
2960 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2961 +				       i * L2X0_LOCKDOWN_STRIDE);
2962 +		}
2963 +
2964 +	}
2965 +
2966 +	local_irq_save(flags);
2967 +	print_lockdown_registers(smp_processor_id());
2968 +	l2c310_flush_all();
2969 +	local_irq_restore(flags);
2970 +out:
2971 +	mutex_unlock(&lockdown_proc);
2972 +	return ret;
2973 +}
2974 +
2975 +void cache_lockdown(u32 lock_val, int cpu)
2976 +{
2977 +	__asm__ __volatile__ (
2978 +"	str	%[lockval], [%[dcachereg]]\n"
2979 +"	str	%[lockval], [%[icachereg]]\n"
2980 +	: 
2981 +	: [dcachereg] "r" (ld_d_reg(cpu)),
2982 +	  [icachereg] "r" (ld_i_reg(cpu)),
2983 +	  [lockval] "r" (lock_val)
2984 +	: "cc");
2985 +}
2986 +
2987 +void do_partition(enum crit_level lv, int cpu)
2988 +{
2989 +	u32 regs;
2990 +	unsigned long flags;
2991 +	
2992 +	if (lock_all || !use_part)
2993 +		return;
2994 +	raw_spin_lock_irqsave(&cache_lock, flags);
2995 +	switch(lv) {
2996 +		case CRIT_LEVEL_A:
2997 +			regs = ~way_partitions[cpu*2];
2998 +			regs &= 0x0000ffff;
2999 +			break;
3000 +		case CRIT_LEVEL_B:
3001 +			regs = ~way_partitions[cpu*2+1];
3002 +			regs &= 0x0000ffff;
3003 +			break;
3004 +		case CRIT_LEVEL_C:
3005 +		case NUM_CRIT_LEVELS:
3006 +			regs = ~way_partitions[8];
3007 +			regs &= 0x0000ffff;
3008 +			break;
3009 +		default:
3010 +			BUG();
3011 +
3012 +	}
3013 +	barrier();
3014 +
3015 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
3016 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
3017 +	barrier();
3018 +
3019 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
3020 +}
3021 +
3022 +void lock_cache(int cpu, u32 val)
3023 +{
3024 +	unsigned long flags;
3025 +	
3026 +	local_irq_save(flags);
3027 +	if (val != 0xffffffff) {
3028 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
3029 +					   cpu * L2X0_LOCKDOWN_STRIDE);
3030 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
3031 +					   cpu * L2X0_LOCKDOWN_STRIDE);
3032 +	}
3033 +	else {
3034 +		int i;
3035 +		for (i = 0; i < 4; i++)
3036 +			do_partition(CRIT_LEVEL_A, i);
3037 +	}
3038 +	local_irq_restore(flags);
3039 +}
3040 +
3041 +int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
3042 +		size_t *lenp, loff_t *ppos)
3043 +{
3044 +	int ret = 0;
3045 +	
3046 +	mutex_lock(&lockdown_proc);
3047 +
3048 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3049 +	if (ret)
3050 +		goto out;
3051 +	
3052 +
3053 +	printk("USE_PART HANDLER = %d\n", use_part);
3054 +
3055 +out:
3056 +	mutex_unlock(&lockdown_proc);
3057 +	return ret;
3058 +}
3059 +
3060 +int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
3061 +		size_t *lenp, loff_t *ppos)
3062 +{
3063 +	int ret = 0;
3064 +	
3065 +	mutex_lock(&lockdown_proc);
3066 +	
3067 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3068 +	if (ret)
3069 +		goto out;
3070 +	
3071 +
3072 +	printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
3073 +
3074 +out:
3075 +	mutex_unlock(&lockdown_proc);
3076 +	return ret;
3077 +}
3078 +
3079 +int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
3080 +		size_t *lenp, loff_t *ppos)
3081 +{
3082 +	int ret = 0, i;
3083 +	
3084 +	mutex_lock(&lockdown_proc);
3085 +	
3086 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3087 +	if (ret)
3088 +		goto out;
3089 +	
3090 +	if (write) {
3091 +		for (i = 0; i < nr_lockregs; i++) {
3092 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
3093 +				       i * L2X0_LOCKDOWN_STRIDE);
3094 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
3095 +				       i * L2X0_LOCKDOWN_STRIDE);
3096 +		}
3097 +	}
3098 +
3099 +out:
3100 +	mutex_unlock(&lockdown_proc);
3101 +	return ret;
3102 +}
3103 +
3104 +int lockdown_global_handler(struct ctl_table *table, int write, void __user *buffer,
3105 +		size_t *lenp, loff_t *ppos)
3106 +{
3107 +	int ret = 0, i;
3108 +	
3109