Attachment 'litmus-rt-rtns17.patch'

Download

   1 diff --git arch/arm/boot/compressed/Makefile arch/arm/boot/compressed/Makefile
   2 index 6e1fb2b..e2284fe 100644
   3 --- arch/arm/boot/compressed/Makefile
   4 +++ arch/arm/boot/compressed/Makefile
   5 @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
   6  ORIG_CFLAGS := $(KBUILD_CFLAGS)
   7  KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
   8  endif
   9 +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
  10  
  11  ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  12  asflags-y := -DZIMAGE
  13 diff --git arch/arm/include/asm/unistd.h arch/arm/include/asm/unistd.h
  14 index 0c462a9..e6cb574 100644
  15 --- arch/arm/include/asm/unistd.h
  16 +++ arch/arm/include/asm/unistd.h
  17 @@ -19,7 +19,8 @@
  18   * This may need to be greater than __NR_last_syscall+1 in order to
  19   * account for the padding in the syscall table
  20   */
  21 -#define __NR_syscalls  (388 + NR_litmus_syscalls)
  22 +#define __NR_syscalls  (392 + NR_litmus_syscalls + 0)
  23 +
  24  
  25  /*
  26   * *NOTE*: This is a ghost syscall private to the kernel.  Only the
  27 diff --git arch/arm/kernel/calls.S arch/arm/kernel/calls.S
  28 index f4738a8..1bfbe24 100644
  29 --- arch/arm/kernel/calls.S
  30 +++ arch/arm/kernel/calls.S
  31 @@ -409,6 +409,16 @@
  32          CALL(sys_wait_for_ts_release)
  33  		CALL(sys_release_ts)
  34  		CALL(sys_null_call)
  35 +/* 400 */	CALL(sys_get_current_budget)
  36 +		CALL(sys_reservation_create)
  37 +		CALL(sys_reservation_destroy)
  38 +		CALL(sys_set_mc2_task_param)
  39 +		CALL(sys_set_page_color)
  40 +/* 405 */	CALL(sys_test_call)
  41 +		CALL(sys_run_test)
  42 +		CALL(sys_lock_buffer)
  43 +		CALL(sys_request_mode)
  44 +		CALL(sys_enact_mode)
  45  
  46  #ifndef syscalls_counted
  47  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
  48 diff --git arch/arm/mm/cache-l2x0.c arch/arm/mm/cache-l2x0.c
  49 index e309c8f..71c969a 100644
  50 --- arch/arm/mm/cache-l2x0.c
  51 +++ arch/arm/mm/cache-l2x0.c
  52 @@ -33,6 +33,8 @@
  53  #include "cache-tauros3.h"
  54  #include "cache-aurora-l2.h"
  55  
  56 +#include <litmus/cache_proc.h>
  57 +
  58  struct l2c_init_data {
  59  	const char *type;
  60  	unsigned way_size_0;
  61 @@ -726,7 +728,6 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  62  
  63  	if (n) {
  64  		unsigned i;
  65 -
  66  		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
  67  		for (i = 0; i < n; i++)
  68  			pr_cont(" %s", errata[i]);
  69 @@ -774,6 +775,11 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
  70  	},
  71  };
  72  
  73 +void l2c310_flush_all(void)
  74 +{
  75 +	l2c210_flush_all();
  76 +};
  77 +
  78  static int __init __l2c_init(const struct l2c_init_data *data,
  79  			     u32 aux_val, u32 aux_mask, u32 cache_id)
  80  {
  81 @@ -876,6 +882,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
  82  	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
  83  		data->type, cache_id, aux);
  84  
  85 +	litmus_setup_lockdown(l2x0_base, cache_id);
  86 +	
  87  	return 0;
  88  }
  89  
  90 diff --git arch/x86/syscalls/syscall_32.tbl arch/x86/syscalls/syscall_32.tbl
  91 index 34680a5..b303a9b 100644
  92 --- arch/x86/syscalls/syscall_32.tbl
  93 +++ arch/x86/syscalls/syscall_32.tbl
  94 @@ -377,3 +377,11 @@
  95  368	i386	wait_for_ts_release	sys_wait_for_ts_release
  96  369	i386	release_ts		sys_release_ts
  97  370	i386	null_call		sys_null_call
  98 +371	i386	get_current_budget	sys_get_current_budget
  99 +372	i386	reservation_create	sys_reservation_create
 100 +373	i386	reservation_destroy	sys_reservation_destroy
 101 +374	i386	set_mc2_task_param	sys_set_mc2_task_param
 102 +375	i386	set_page_color		sys_set_page_color
 103 +376	i386	test_call		sys_test_call
 104 +377	i386	run_test		sys_run_test
 105 +378	i386	lock_buffer		sys_lock_buffer
 106 diff --git arch/x86/syscalls/syscall_64.tbl arch/x86/syscalls/syscall_64.tbl
 107 index cbd1b6b..5f24a80 100644
 108 --- arch/x86/syscalls/syscall_64.tbl
 109 +++ arch/x86/syscalls/syscall_64.tbl
 110 @@ -342,6 +342,14 @@
 111  360	common	wait_for_ts_release	sys_wait_for_ts_release
 112  361	common	release_ts		sys_release_ts
 113  362	common	null_call		sys_null_call
 114 +363	common	get_current_budget	sys_get_current_budget
 115 +364	common	reservation_create	sys_reservation_create
 116 +365	common	reservation_destroy	sys_reservation_destroy
 117 +366	common	set_mc2_task_param	sys_set_mc2_task_param
 118 +367	common	set_page_color		sys_set_page_color
 119 +368	common	test_call		sys_test_call
 120 +369	common	run_test		sys_run_test
 121 +370	common	lock_buffer		sys_lock_buffer
 122  
 123  #
 124  # x32-specific system call numbers start at 512 to avoid cache impact
 125 diff --git include/litmus/budget.h include/litmus/budget.h
 126 index bd2d5c9..60eb814 100644
 127 --- include/litmus/budget.h
 128 +++ include/litmus/budget.h
 129 @@ -33,4 +33,6 @@ static inline int requeue_preempted_job(struct task_struct* t)
 130  		(!budget_exhausted(t) || !budget_enforced(t));
 131  }
 132  
 133 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining);
 134 +
 135  #endif
 136 diff --git include/litmus/cache_proc.h include/litmus/cache_proc.h
 137 new file mode 100644
 138 index 0000000..e9440de
 139 --- /dev/null
 140 +++ include/litmus/cache_proc.h
 141 @@ -0,0 +1,17 @@
 142 +#ifndef LITMUS_CACHE_PROC_H
 143 +#define LITMUS_CACHE_PROC_H
 144 +
 145 +#ifdef __KERNEL__
 146 +
 147 +void litmus_setup_lockdown(void __iomem*, u32);
 148 +void enter_irq_mode(void);
 149 +void exit_irq_mode(void);
 150 +void flush_cache(int all);
 151 +void lock_cache(int cpu, u32 val);
 152 +
 153 +extern struct page *new_alloc_page_color(unsigned long color);
 154 +
 155 +#endif
 156 +
 157 +#endif
 158 +
 159 diff --git include/litmus/litmus.h include/litmus/litmus.h
 160 index a6eb534..1037b48 100644
 161 --- include/litmus/litmus.h
 162 +++ include/litmus/litmus.h
 163 @@ -113,6 +113,13 @@ static inline lt_t litmus_clock(void)
 164  	((current)->state == TASK_RUNNING || 	\
 165  	 preempt_count() & PREEMPT_ACTIVE)
 166  
 167 +#define is_running(t) 			\
 168 +	((t)->state == TASK_RUNNING || 	\
 169 +	 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
 170 +
 171 +#define is_blocked(t)       \
 172 +	(!is_running(t))
 173 +
 174  #define is_released(t, now)	\
 175  	(lt_before_eq(get_release(t), now))
 176  #define is_tardy(t, now)    \
 177 @@ -130,6 +137,11 @@ void preempt_if_preemptable(struct task_struct* t, int on_cpu);
 178  
 179  #define bheap2task(hn) ((struct task_struct*) hn->value)
 180  
 181 +static inline int is_mode_poll_task(struct task_struct *t)
 182 +{
 183 +	return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->mode_poll_task : 0;
 184 +}
 185 +
 186  #ifdef CONFIG_NP_SECTION
 187  
 188  static inline int is_kernel_np(struct task_struct *t)
 189 diff --git include/litmus/mc2_common.h include/litmus/mc2_common.h
 190 new file mode 100644
 191 index 0000000..4b077ca
 192 --- /dev/null
 193 +++ include/litmus/mc2_common.h
 194 @@ -0,0 +1,35 @@
 195 +/*
 196 + * MC^2 common data structures
 197 + */
 198 + 
 199 +#ifndef __UNC_MC2_COMMON_H__
 200 +#define __UNC_MC2_COMMON_H__
 201 +
 202 +#define NR_MODES 32
 203 +
 204 +enum crit_level {
 205 +	CRIT_LEVEL_A = 0,
 206 +	CRIT_LEVEL_B = 1,
 207 +	CRIT_LEVEL_C = 2,
 208 +	NUM_CRIT_LEVELS = 3,
 209 +	MODE_POLL_TASK = 4,
 210 +};
 211 +
 212 +struct mc2_task {
 213 +	enum crit_level crit;
 214 +	unsigned int res_id;
 215 +	uint32_t mode_mask;
 216 +	int init_finished;
 217 +};
 218 +
 219 +#ifdef __KERNEL__
 220 +
 221 +#include <litmus/reservation.h>
 222 +
 223 +#define tsk_mc2_data(t)		(tsk_rt(t)->mc2_data)
 224 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
 225 +							struct reservation *res);
 226 +	
 227 +#endif /* __KERNEL__ */
 228 +
 229 +#endif
 230 diff --git include/litmus/polling_reservations.h include/litmus/polling_reservations.h
 231 new file mode 100644
 232 index 0000000..66c9b1e
 233 --- /dev/null
 234 +++ include/litmus/polling_reservations.h
 235 @@ -0,0 +1,36 @@
 236 +#ifndef LITMUS_POLLING_RESERVATIONS_H
 237 +#define LITMUS_POLLING_RESERVATIONS_H
 238 +
 239 +#include <litmus/reservation.h>
 240 +
 241 +struct polling_reservation {
 242 +	/* extend basic reservation */
 243 +	struct reservation res;
 244 +
 245 +	lt_t max_budget;
 246 +	lt_t period;
 247 +	lt_t deadline;
 248 +	lt_t offset;
 249 +};
 250 +
 251 +void polling_reservation_init(struct polling_reservation *pres, int use_edf_prio,
 252 +	int use_periodic_polling, lt_t budget, lt_t period, lt_t deadline, lt_t offset);
 253 +
 254 +struct table_driven_reservation {
 255 +	/* extend basic reservation */
 256 +	struct reservation res;
 257 +
 258 +	lt_t major_cycle;
 259 +	unsigned int next_interval;
 260 +	unsigned int num_intervals;
 261 +	struct lt_interval *intervals;
 262 +
 263 +	/* info about current scheduling slot */
 264 +	struct lt_interval cur_interval;
 265 +	lt_t major_cycle_start;
 266 +};
 267 +
 268 +void table_driven_reservation_init(struct table_driven_reservation *tdres,
 269 +	lt_t major_cycle, struct lt_interval *intervals, unsigned int num_intervals);
 270 +
 271 +#endif
 272 diff --git include/litmus/reservation.h include/litmus/reservation.h
 273 new file mode 100644
 274 index 0000000..ee3aac2
 275 --- /dev/null
 276 +++ include/litmus/reservation.h
 277 @@ -0,0 +1,263 @@
 278 +#ifndef LITMUS_RESERVATION_H
 279 +#define LITMUS_RESERVATION_H
 280 +
 281 +#include <linux/list.h>
 282 +#include <linux/hrtimer.h>
 283 +
 284 +struct reservation_client;
 285 +struct reservation_environment;
 286 +struct reservation;
 287 +
 288 +typedef enum {
 289 +	/* reservation has no clients, is not consuming budget */
 290 +	RESERVATION_INACTIVE = 0,
 291 +
 292 +	/* reservation has clients, consumes budget when scheduled */
 293 +	RESERVATION_ACTIVE,
 294 +
 295 +	/* reservation has no clients, but may be consuming budget */
 296 +	RESERVATION_ACTIVE_IDLE,
 297 +
 298 +	/* Reservation has no budget and waits for
 299 +	 * replenishment. May or may not have clients. */
 300 +	RESERVATION_DEPLETED,
 301 +} reservation_state_t;
 302 +
 303 +
 304 +/* ************************************************************************** */
 305 +
 306 +/* Select which task to dispatch. If NULL is returned, it means there is nothing
 307 + * to schedule right now and background work can be scheduled. */
 308 +typedef struct task_struct * (*dispatch_t)  (
 309 +	struct reservation_client *client
 310 +);
 311 +
 312 +/* Something that can be managed in a reservation and that can yield
 313 + * a process for dispatching. Contains a pointer to the reservation
 314 + * to which it "belongs". */
 315 +struct reservation_client {
 316 +	struct list_head list;
 317 +	struct reservation* reservation;
 318 +	dispatch_t dispatch;
 319 +};
 320 +
 321 +
 322 +/* ************************************************************************** */
 323 +
 324 +/* Called by reservations to request state change. */
 325 +typedef void (*reservation_change_state_t)  (
 326 +	struct reservation_environment* env,
 327 +	struct reservation *res,
 328 +	reservation_state_t new_state
 329 +);
 330 +
 331 +/* The framework within wich reservations operate. */
 332 +struct reservation_environment {
 333 +	lt_t time_zero;
 334 +	lt_t current_time;
 335 +
 336 +	/* services invoked by reservations */
 337 +	reservation_change_state_t change_state;
 338 +};
 339 +
 340 +
 341 +/* ************************************************************************** */
 342 +
 343 +/* A new client is added or an existing client resumes. */
 344 +typedef void (*client_arrives_t)  (
 345 +	struct reservation *reservation,
 346 +	struct reservation_client *client
 347 +);
 348 +
 349 +/* A client suspends or terminates. */
 350 +typedef void (*client_departs_t)  (
 351 +	struct reservation *reservation,
 352 +	struct reservation_client *client,
 353 +	int did_signal_job_completion
 354 +);
 355 +
 356 +/* A previously requested replenishment has occurred. */
 357 +typedef void (*on_replenishment_timer_t)  (
 358 +	struct reservation *reservation
 359 +);
 360 +
 361 +/* Update the reservation's budget to reflect execution or idling. */
 362 +typedef void (*drain_budget_t) (
 363 +	struct reservation *reservation,
 364 +	lt_t how_much
 365 +);
 366 +
 367 +/* Select a ready task from one of the clients for scheduling. */
 368 +typedef struct task_struct* (*dispatch_client_t)  (
 369 +	struct reservation *reservation,
 370 +	lt_t *time_slice /* May be used to force rescheduling after
 371 +	                    some amount of time. 0 => no limit */
 372 +);
 373 +
 374 +
 375 +struct reservation_ops {
 376 +	dispatch_client_t dispatch_client;
 377 +
 378 +	client_arrives_t client_arrives;
 379 +	client_departs_t client_departs;
 380 +
 381 +	on_replenishment_timer_t replenish;
 382 +	drain_budget_t drain_budget;
 383 +};
 384 +
 385 +struct reservation {
 386 +	/* used to queue in environment */
 387 +	struct list_head list;
 388 +
 389 +	reservation_state_t state;
 390 +	unsigned int id;
 391 +
 392 +	/* exact meaning defined by impl. */
 393 +	lt_t priority;
 394 +	lt_t cur_budget;
 395 +	lt_t next_replenishment;
 396 +
 397 +	/* budget stats */
 398 +	lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
 399 +	lt_t budget_consumed_total;
 400 +
 401 +	/* interaction with framework */
 402 +	struct reservation_environment *env;
 403 +	struct reservation_ops *ops;
 404 +
 405 +	struct list_head clients;
 406 +	
 407 +	/* for global env. */
 408 +	int scheduled_on;
 409 +	int event_added;
 410 +	/* for blocked by ghost. Do not charge budget when ACTIVE */
 411 +	int blocked_by_ghost;
 412 +	/* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
 413 +	int is_ghost;
 414 +
 415 +	/* for mode change */
 416 +	int reported;
 417 +	int mode;
 418 +	//under mc2 assume only 1 task per reservation
 419 +	//multiple reservation per task
 420 +	struct task_struct *tsk;
 421 +};
 422 +
 423 +void reservation_init(struct reservation *res);
 424 +
 425 +/* Default implementations */
 426 +
 427 +/* simply select the first client in the list, set *for_at_most to zero */
 428 +struct task_struct* default_dispatch_client(
 429 +	struct reservation *res,
 430 +	lt_t *for_at_most
 431 +);
 432 +
 433 +/* "connector" reservation client to hook up tasks with reservations */
 434 +struct task_client {
 435 +	struct reservation_client client;
 436 +	struct task_struct *task;
 437 +};
 438 +
 439 +void task_client_init(struct task_client *tc, struct task_struct *task,
 440 +	struct reservation *reservation);
 441 +
 442 +#define SUP_RESCHEDULE_NOW (0)
 443 +#define SUP_NO_SCHEDULER_UPDATE (ULLONG_MAX)
 444 +
 445 +/* A simple uniprocessor (SUP) flat (i.e., non-hierarchical) reservation
 446 + * environment.
 447 + */
 448 +struct sup_reservation_environment {
 449 +	struct reservation_environment env;
 450 +
 451 +	/* ordered by priority */
 452 +	struct list_head active_reservations;
 453 +
 454 +	/* ordered by next_replenishment */
 455 +	struct list_head depleted_reservations;
 456 +
 457 +	/* unordered */
 458 +	struct list_head inactive_reservations;
 459 +
 460 +	/* - SUP_RESCHEDULE_NOW means call sup_dispatch() now
 461 +	 * - SUP_NO_SCHEDULER_UPDATE means nothing to do
 462 +	 * any other value means program a timer for the given time
 463 +	 */
 464 +	lt_t next_scheduler_update;
 465 +	/* set to true if a call to sup_dispatch() is imminent */
 466 +	bool will_schedule;
 467 +};
 468 +
 469 +/* Contract:
 470 + *  - before calling into sup_ code, or any reservation methods,
 471 + *    update the time with sup_update_time(); and
 472 + *  - after calling into sup_ code, or any reservation methods,
 473 + *    check next_scheduler_update and program timer or trigger
 474 + *    scheduler invocation accordingly.
 475 + */
 476 +
 477 +void sup_init(struct sup_reservation_environment* sup_env);
 478 +void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
 479 +	struct reservation* new_res);
 480 +void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
 481 +	lt_t timeout);
 482 +void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
 483 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
 484 +
 485 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
 486 +	unsigned int id);
 487 +	
 488 +/* A global multiprocessor reservation environment. */
 489 +
 490 +typedef enum {
 491 +	EVENT_REPLENISH = 0,
 492 +	EVENT_DRAIN,
 493 +	EVENT_OTHERS,
 494 +} event_type_t;
 495 +
 496 +
 497 +struct next_timer_event {
 498 +	lt_t next_update;
 499 +	int timer_armed_on;
 500 +	unsigned int id;
 501 +	event_type_t type;
 502 +	struct list_head list;
 503 +};
 504 +
 505 +struct gmp_reservation_environment {
 506 +	//raw_spinlock_t lock;
 507 +	struct reservation_environment env;
 508 +
 509 +	/* ordered by priority */
 510 +	struct list_head active_reservations;
 511 +
 512 +	/* ordered by next_replenishment */
 513 +	struct list_head depleted_reservations;
 514 +
 515 +	/* unordered */
 516 +	struct list_head inactive_reservations;
 517 +
 518 +	/* timer event ordered by next_update */
 519 +	struct list_head next_events;
 520 +	
 521 +	/* (schedule_now == true) means call gmp_dispatch() now */
 522 +	int schedule_now;
 523 +	/* set to true if a call to gmp_dispatch() is imminent */
 524 +	bool will_schedule;
 525 +};
 526 +
 527 +void gmp_init(struct gmp_reservation_environment* gmp_env);
 528 +void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
 529 +	struct reservation* new_res);
 530 +void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
 531 +	lt_t timeout, unsigned int id, event_type_t type);
 532 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
 533 +int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
 534 +struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
 535 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
 536 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
 537 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
 538 +	unsigned int id);
 539 +
 540 +#endif
 541 diff --git include/litmus/rt_param.h include/litmus/rt_param.h
 542 index 7b9a909..2ec2d0c 100644
 543 --- include/litmus/rt_param.h
 544 +++ include/litmus/rt_param.h
 545 @@ -51,6 +51,16 @@ typedef enum {
 546  	TASK_EARLY
 547  } release_policy_t;
 548  
 549 +#ifdef CONFIG_PGMRT_SUPPORT
 550 +typedef enum {
 551 +    PGM_NOT_A_NODE,
 552 +    PGM_SRC,
 553 +    PGM_SINK,
 554 +    PGM_SRC_SINK,
 555 +    PGM_INTERNAL
 556 +} pgm_node_type_t;
 557 +#endif
 558 +
 559  /* We use the common priority interpretation "lower index == higher priority",
 560   * which is commonly used in fixed-priority schedulability analysis papers.
 561   * So, a numerically lower priority value implies higher scheduling priority,
 562 @@ -62,6 +72,7 @@ typedef enum {
 563  #define LITMUS_MAX_PRIORITY     512
 564  #define LITMUS_HIGHEST_PRIORITY   1
 565  #define LITMUS_LOWEST_PRIORITY    (LITMUS_MAX_PRIORITY - 1)
 566 +#define LITMUS_NO_PRIORITY		UINT_MAX
 567  
 568  /* Provide generic comparison macros for userspace,
 569   * in case that we change this later. */
 570 @@ -71,6 +82,49 @@ typedef enum {
 571  	((p) >= LITMUS_HIGHEST_PRIORITY &&	\
 572  	 (p) <= LITMUS_LOWEST_PRIORITY)
 573  
 574 +/* reservation support */
 575 +
 576 +typedef enum {
 577 +	PERIODIC_POLLING,
 578 +	SPORADIC_POLLING,
 579 +	TABLE_DRIVEN,
 580 +} reservation_type_t;
 581 +
 582 +struct lt_interval {
 583 +	lt_t start;
 584 +	lt_t end;
 585 +};
 586 +
 587 +#ifndef __KERNEL__
 588 +#define __user
 589 +#endif
 590 +
 591 +struct reservation_config {
 592 +	unsigned int id;
 593 +	lt_t priority;
 594 +	int  cpu;
 595 +
 596 +	union {
 597 +		struct {
 598 +			lt_t period;
 599 +			lt_t budget;
 600 +			lt_t relative_deadline;
 601 +			lt_t offset;
 602 +		} polling_params;
 603 +
 604 +		struct {
 605 +			lt_t major_cycle_length;
 606 +			unsigned int num_intervals;
 607 +			struct lt_interval __user *intervals;
 608 +		} table_driven_params;
 609 +	};
 610 +	
 611 +	//Need to inform which mode reservation belongs to in mc2
 612 +	int mode;
 613 +};
 614 +
 615 +/* regular sporadic task support */
 616 +
 617  struct rt_task {
 618  	lt_t 		exec_cost;
 619  	lt_t 		period;
 620 @@ -81,6 +135,10 @@ struct rt_task {
 621  	task_class_t	cls;
 622  	budget_policy_t  budget_policy;  /* ignored by pfair */
 623  	release_policy_t release_policy;
 624 +#ifdef CONFIG_PGMRT_SUPPORT
 625 +	pgm_node_type_t	pgm_type;
 626 +	lt_t			pgm_expected_etoe;
 627 +#endif
 628  };
 629  
 630  union np_flag {
 631 @@ -120,6 +178,14 @@ struct control_page {
 632  	uint64_t ts_syscall_start;  /* Feather-Trace cycles */
 633  	uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
 634  				     * started. */
 635 +	volatile uint64_t mode_poll_task;		/* Set to 1 if the task invokes enact_mode syscall */
 636 +
 637 +#ifdef CONFIG_PGMRT_SUPPORT
 638 +    /* Flags from userspace signifying PGM wait states. */
 639 +    volatile uint32_t   pgm_waiting;    /* waiting for tokens */
 640 +    volatile uint32_t   pgm_sending;    /* sending tokens */
 641 +    volatile uint32_t   pgm_satisfied;  /* done waiting/sending */
 642 +#endif
 643  
 644  	/* to be extended */
 645  };
 646 @@ -130,6 +196,7 @@ struct control_page {
 647  #define LITMUS_CP_OFFSET_IRQ_COUNT	8
 648  #define LITMUS_CP_OFFSET_TS_SC_START	16
 649  #define LITMUS_CP_OFFSET_IRQ_SC_START	24
 650 +#define LITMUS_CP_OFFSET_MODE_POLL_TASK	32
 651  
 652  /* don't export internal data structures to user space (liblitmus) */
 653  #ifdef __KERNEL__
 654 @@ -165,6 +232,7 @@ struct rt_job {
 655  };
 656  
 657  struct pfair_param;
 658 +struct mc2_task;
 659  
 660  /*	RT task parameters for scheduling extensions
 661   *	These parameters are inherited during clone and therefore must
 662 @@ -246,7 +314,10 @@ struct rt_param {
 663  	volatile int		linked_on;
 664  
 665  	/* PFAIR/PD^2 state. Allocated on demand. */
 666 -	struct pfair_param*	pfair;
 667 +	union {
 668 +		void *plugin_state;
 669 +		struct pfair_param *pfair;
 670 +	};
 671  
 672  	/* Fields saved before BE->RT transition.
 673  	 */
 674 @@ -275,6 +346,10 @@ struct rt_param {
 675  
 676  	/* Pointer to the page shared between userspace and kernel. */
 677  	struct control_page * ctrl_page;
 678 +
 679 +	/* Mixed-criticality specific data */
 680 +	struct mc2_task* mc2_data;
 681 +	unsigned long addr_ctrl_page;
 682  };
 683  
 684  #endif
 685 diff --git include/litmus/sched_plugin.h include/litmus/sched_plugin.h
 686 index 0ccccd6..4c8aaa6 100644
 687 --- include/litmus/sched_plugin.h
 688 +++ include/litmus/sched_plugin.h
 689 @@ -77,6 +77,17 @@ typedef long (*wait_for_release_at_t)(lt_t release_time);
 690  /* Informs the plugin when a synchronous release takes place. */
 691  typedef void (*synchronous_release_at_t)(lt_t time_zero);
 692  
 693 +/* How much budget has the current task consumed so far, and how much
 694 + * has it left? The default implementation ties into the per-task
 695 + * budget enforcement code. Plugins can override this to report
 696 + * reservation-specific values. */
 697 +typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining);
 698 +
 699 +/* Reservation creation/removal backends. Meaning of reservation_type and
 700 + * reservation_id are entirely plugin-specific. */
 701 +typedef long (*reservation_create_t)(int reservation_type, void* __user config);
 702 +typedef long (*reservation_destroy_t)(unsigned int reservation_id, int cpu);
 703 +
 704  /************************ misc routines ***********************/
 705  
 706  
 707 @@ -109,6 +120,12 @@ struct sched_plugin {
 708  	task_exit_t 		task_exit;
 709  	task_cleanup_t		task_cleanup;
 710  
 711 +	current_budget_t	current_budget;
 712 +
 713 +	/* Reservation support */
 714 +	reservation_create_t	reservation_create;
 715 +	reservation_destroy_t	reservation_destroy;
 716 +
 717  #ifdef CONFIG_LITMUS_LOCKING
 718  	/*	locking protocols	*/
 719  	allocate_lock_t		allocate_lock;
 720 diff --git include/litmus/sched_trace.h include/litmus/sched_trace.h
 721 index 82bde82..80d9523 100644
 722 --- include/litmus/sched_trace.h
 723 +++ include/litmus/sched_trace.h
 724 @@ -50,13 +50,12 @@ struct st_switch_away_data {	/* A process was switched away from on a given CPU.
 725  	u64	exec_time;
 726  };
 727  
 728 -struct st_completion_data {	/* A job completed. */
 729 -	u64	when;
 730 -	u8	forced:1; 	/* Set to 1 if job overran and kernel advanced to the
 731 -				 * next task automatically; set to 0 otherwise.
 732 -				 */
 733 -	u8	__uflags:7;
 734 -	u8	__unused[7];
 735 +struct st_completion_data { /* A job completed. */
 736 +    u64 when;
 737 +    u8  forced:1;   /* Set to 1 if job overran and kernel advanced to the
 738 +                 * next job automatically; set to 0 otherwise.
 739 +                 */
 740 +    u64 exec_time:63; /* Actual execution time of job. */
 741  };
 742  
 743  struct st_block_data {		/* A task blocks. */
 744 @@ -80,6 +79,21 @@ struct st_sys_release_data {
 745  	u64	release;
 746  };
 747  
 748 +struct st_enact_mode_data {
 749 +	u64	when;
 750 +	u8	__unused[8];
 751 +};
 752 +
 753 +struct st_request_mode_data {
 754 +	u64	when;
 755 +	u8	__unused[8];
 756 +};
 757 +
 758 +struct st_sys_start_data {
 759 +	u64	when;
 760 +	u64	start;
 761 +};
 762 +
 763  #define DATA(x) struct st_ ## x ## _data x;
 764  
 765  typedef enum {
 766 @@ -87,14 +101,16 @@ typedef enum {
 767  				 * uninitialized records. */
 768  	ST_PARAM,
 769  	ST_RELEASE,
 770 -	ST_ASSIGNED,
 771  	ST_SWITCH_TO,
 772  	ST_SWITCH_AWAY,
 773  	ST_COMPLETION,
 774  	ST_BLOCK,
 775  	ST_RESUME,
 776  	ST_ACTION,
 777 -	ST_SYS_RELEASE
 778 +	ST_SYS_RELEASE,
 779 +	ST_ENACT_MODE,
 780 +	ST_REQUEST_MODE,
 781 +	ST_SYS_START,
 782  } st_event_record_type_t;
 783  
 784  struct st_event_record {
 785 @@ -105,7 +121,6 @@ struct st_event_record {
 786  		DATA(name);
 787  		DATA(param);
 788  		DATA(release);
 789 -		DATA(assigned);
 790  		DATA(switch_to);
 791  		DATA(switch_away);
 792  		DATA(completion);
 793 @@ -113,6 +128,9 @@ struct st_event_record {
 794  		DATA(resume);
 795  		DATA(action);
 796  		DATA(sys_release);
 797 +		DATA(enact_mode);
 798 +		DATA(request_mode);
 799 +		DATA(sys_start);
 800  	} data;
 801  };
 802  
 803 @@ -155,6 +173,14 @@ feather_callback void do_sched_trace_action(unsigned long id,
 804  feather_callback void do_sched_trace_sys_release(unsigned long id,
 805  						 lt_t* start);
 806  
 807 +feather_callback void do_sched_trace_enact_mode(unsigned long id,
 808 +						struct task_struct* task);
 809 +
 810 +feather_callback void do_sched_trace_request_mode(unsigned long id,
 811 +						struct task_struct* task);
 812 +feather_callback void do_sched_trace_sys_start(unsigned long id,
 813 +						 lt_t* start);
 814 +
 815  #endif
 816  
 817  #else
 818 @@ -179,6 +205,9 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
 819  #define trace_litmus_task_block(t)
 820  #define trace_litmus_task_resume(t)
 821  #define trace_litmus_sys_release(start)
 822 +#define trace_litmus_enact_mode(t)
 823 +#define trace_litmus_request_mode(t)
 824 +#define trace_litmus_sys_start(start)
 825  
 826  #endif
 827  
 828 @@ -254,6 +283,28 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
 829  
 830  #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
 831  
 832 +#define sched_trace_enact_mode(t)					\
 833 +	do {								\
 834 +		SCHED_TRACE(SCHED_TRACE_BASE_ID + 11,			\
 835 +			do_sched_trace_enact_mode, t);			\
 836 +		trace_litmus_enact_mode(t);				\
 837 +	} while (0)
 838 +
 839 +#define sched_trace_request_mode(t)					\
 840 +	do {								\
 841 +		SCHED_TRACE(SCHED_TRACE_BASE_ID + 12,			\
 842 +			do_sched_trace_request_mode, t);		\
 843 +		trace_litmus_request_mode(t);				\
 844 +	} while (0)
 845 +
 846 +/* when is a pointer, it does not need an explicit cast to unsigned long */
 847 +#define sched_trace_sys_start(when)					\
 848 +	do {								\
 849 +		SCHED_TRACE(SCHED_TRACE_BASE_ID + 13,			\
 850 +			do_sched_trace_sys_start, when);		\
 851 +		trace_litmus_sys_start(when);				\
 852 +	} while (0)
 853 +
 854  #endif /* __KERNEL__ */
 855  
 856  #endif
 857 diff --git include/litmus/trace.h include/litmus/trace.h
 858 index 6017872..4dbb39ea 100644
 859 --- include/litmus/trace.h
 860 +++ include/litmus/trace.h
 861 @@ -3,7 +3,6 @@
 862  
 863  #ifdef CONFIG_SCHED_OVERHEAD_TRACE
 864  
 865 -
 866  #include <litmus/feather_trace.h>
 867  #include <litmus/feather_buffer.h>
 868  
 869 @@ -118,6 +117,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 870  #define TS_TICK_START(t)		CPU_TTIMESTAMP(110, t)
 871  #define TS_TICK_END(t) 			CPU_TTIMESTAMP(111, t)
 872  
 873 +#define TS_RELEASE_C_START		CPU_DTIMESTAMP(108, TSK_RT)
 874 +#define TS_RELEASE_C_END		CPU_DTIMESTAMP(109, TSK_RT)
 875 +
 876  #define TS_QUANTUM_BOUNDARY_START	CPU_TIMESTAMP_CUR(112)
 877  #define TS_QUANTUM_BOUNDARY_END		CPU_TIMESTAMP_CUR(113)
 878  
 879 @@ -137,6 +139,20 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 880  #define TS_SEND_RESCHED_START(c)	MSG_TIMESTAMP_SENT(190, c)
 881  #define TS_SEND_RESCHED_END		MSG_TIMESTAMP_RECEIVED(191)
 882  
 883 -#define TS_RELEASE_LATENCY(when)	CPU_LTIMESTAMP(208, &(when))
 884 +#define TS_ISR_START			CPU_TIMESTAMP_CUR(192)
 885 +#define TS_ISR_END				CPU_TIMESTAMP_CUR(193)
 886 +
 887 +#define TS_MODE_CHANGE_START	CPU_TIMESTAMP(194)
 888 +#define TS_MODE_CHANGE_END		CPU_TIMESTAMP(195)
 889 +
 890 +#define TS_RELEASE_LATENCY(when)    CPU_LTIMESTAMP(208, &(when))
 891 +#define TS_RELEASE_LATENCY_A(when)  CPU_LTIMESTAMP(209, &(when))
 892 +#define TS_RELEASE_LATENCY_B(when)  CPU_LTIMESTAMP(210, &(when))
 893 +#define TS_RELEASE_LATENCY_C(when)  CPU_LTIMESTAMP(211, &(when))
 894 +
 895 +#define TS_SCHED_A_START			CPU_DTIMESTAMP(212, TSK_UNKNOWN)
 896 +#define TS_SCHED_A_END(t)			CPU_TTIMESTAMP(213, t)
 897 +#define TS_SCHED_C_START			CPU_DTIMESTAMP(214, TSK_UNKNOWN)
 898 +#define TS_SCHED_C_END(t)			CPU_TTIMESTAMP(215, t)
 899  
 900  #endif /* !_SYS_TRACE_H_ */
 901 diff --git include/litmus/unistd_32.h include/litmus/unistd_32.h
 902 index 94264c2..86bbbb8d 100644
 903 --- include/litmus/unistd_32.h
 904 +++ include/litmus/unistd_32.h
 905 @@ -17,5 +17,13 @@
 906  #define __NR_wait_for_ts_release __LSC(9)
 907  #define __NR_release_ts		__LSC(10)
 908  #define __NR_null_call		__LSC(11)
 909 +#define __NR_get_current_budget __LSC(12)
 910 +#define __NR_reservation_create	__LSC(13)
 911 +#define __NR_reservation_destroy	__LSC(14)
 912 +#define __NR_set_mc2_task_param	__LSC(15)
 913 +#define __NR_set_page_color		__LSC(16)
 914 +#define __NR_test_call		__LSC(17)
 915 +#define __NR_run_test		__LSC(18)
 916 +#define __NR_lock_buffer	__LSC(19)
 917  
 918 -#define NR_litmus_syscalls 12
 919 +#define NR_litmus_syscalls	20
 920 diff --git include/litmus/unistd_64.h include/litmus/unistd_64.h
 921 index d5ced0d..4b96e7c 100644
 922 --- include/litmus/unistd_64.h
 923 +++ include/litmus/unistd_64.h
 924 @@ -29,5 +29,22 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
 925  __SYSCALL(__NR_release_ts, sys_release_ts)
 926  #define __NR_null_call				__LSC(11)
 927  __SYSCALL(__NR_null_call, sys_null_call)
 928 +#define __NR_get_current_budget			__LSC(12)
 929 +__SYSCALL(__NR_get_current_budget, sys_get_current_budget)
 930 +#define __NR_reservation_create		__LSC(13)
 931 +__SYSCALL(__NR_reservation_create, sys_reservation_create)
 932 +#define __NR_reservation_destroy	__LSC(14)
 933 +__SYSCALL(__NR_reservation_destroy, sys_reservation_destroy)
 934 +#define __NR_set_mc2_task_param		__LSC(15)
 935 +__SYSCALL(__NR_set_mc2_task_param,	sys_set_mc2_task_param)
 936 +#define __NR_set_page_color			__LSC(16)
 937 +__SYSCALL(__NR_set_page_color,		sys_set_page_color)
 938 +#define __NR_test_call				__LSC(17)
 939 +__SYSCALL(__NR_test_call, sys_test_call)
 940 +#define __NR_run_test				__LSC(18)
 941 +__SYSCALL(__NR_run_test, sys_run_test)
 942 +#define __NR_lock_buffer			__LSC(19)
 943 +__SYACALL(__NR_lock_buffer, sys_lock_buffer)
 944  
 945 -#define NR_litmus_syscalls 12
 946 +
 947 +#define NR_litmus_syscalls 20
 948 diff --git include/trace/events/litmus.h include/trace/events/litmus.h
 949 index 0fffcee..ef8d60f2 100644
 950 --- include/trace/events/litmus.h
 951 +++ include/trace/events/litmus.h
 952 @@ -225,6 +225,76 @@ TRACE_EVENT(litmus_sys_release,
 953  	TP_printk("SynRelease(%Lu) at %Lu\n", __entry->rel, __entry->when)
 954  );
 955  
 956 +/*
 957 + * Trace enact mode
 958 + */
 959 +TRACE_EVENT(litmus_enact_mode,
 960 +
 961 +	TP_PROTO(struct task_struct *t),
 962 +
 963 +	TP_ARGS(t),
 964 +
 965 +	TP_STRUCT__entry(
 966 +		__field( pid_t,		pid	)
 967 +		__field( unsigned int,	job	)
 968 +		__field( lt_t,		when	)
 969 +	),
 970 +
 971 +	TP_fast_assign(
 972 +		__entry->pid	= t ? t->pid : 0;
 973 +		__entry->job	= t ? t->rt_param.job_params.job_no : 0;
 974 +		__entry->when	= litmus_clock();
 975 +	),
 976 +
 977 +	TP_printk("Mode Enact at %Lu\n", __entry->when)
 978 +);
 979 +
 980 +/*
 981 + * Trace notice mode request
 982 + */
 983 +TRACE_EVENT(litmus_request_mode,
 984 +
 985 +	TP_PROTO(struct task_struct *t),
 986 +
 987 +	TP_ARGS(t),
 988 +
 989 +	TP_STRUCT__entry(
 990 +		__field( pid_t,		pid	)
 991 +		__field( unsigned int,	job	)
 992 +		__field( lt_t,		when	)
 993 +	),
 994 +
 995 +	TP_fast_assign(
 996 +		__entry->pid	= t ? t->pid : 0;
 997 +		__entry->job	= t ? t->rt_param.job_params.job_no : 0;
 998 +		__entry->when	= litmus_clock();
 999 +	),
1000 +
1001 +	TP_printk("Mode request at %Lu\n", __entry->when)
1002 +);	
1003 +
1004 +/*
1005 + * Trace synchronous start
1006 + */
1007 +TRACE_EVENT(litmus_sys_start,
1008 +
1009 +	TP_PROTO(lt_t *start),
1010 +
1011 +	TP_ARGS(start),
1012 +
1013 +	TP_STRUCT__entry(
1014 +		__field( lt_t,		rel	)
1015 +		__field( lt_t,		when	)
1016 +	),
1017 +
1018 +	TP_fast_assign(
1019 +		__entry->rel	= *start;
1020 +		__entry->when	= litmus_clock();
1021 +	),
1022 +
1023 +	TP_printk("SynStart(%Lu) at %Lu\n", __entry->rel, __entry->when)
1024 +);
1025 +
1026  #endif /* _SCHED_TASK_TRACEPOINT_H */
1027  
1028  /* Must stay outside the protection */
1029 diff --git kernel/sched/litmus.c kernel/sched/litmus.c
1030 index 9d58690..60be718 100644
1031 --- kernel/sched/litmus.c
1032 +++ kernel/sched/litmus.c
1033 @@ -20,8 +20,9 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p)
1034  	/* task counter */
1035  	p->se.sum_exec_runtime += delta;
1036  	if (delta) {
1037 -		TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
1038 -			delta, p->rt_param.job_params.exec_time, budget_remaining(p));
1039 +		//TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
1040 +		//	delta, p->rt_param.job_params.exec_time, budget_remaining(p));
1041 +		;
1042  	}
1043  	/* sched_clock() */
1044  	p->se.exec_start = rq->clock;
1045 diff --git litmus/Kconfig litmus/Kconfig
1046 index babb43d..d47548d 100644
1047 --- litmus/Kconfig
1048 +++ litmus/Kconfig
1049 @@ -243,7 +243,7 @@ config SCHED_TASK_TRACE
1050  config SCHED_TASK_TRACE_SHIFT
1051         int "Buffer size for sched_trace_xxx() events"
1052         depends on SCHED_TASK_TRACE
1053 -       range 8 13
1054 +       range 8 22
1055         default 9
1056         help
1057  
1058 diff --git litmus/Makefile litmus/Makefile
1059 index 7970cd5..e274409 100644
1060 --- litmus/Makefile
1061 +++ litmus/Makefile
1062 @@ -11,6 +11,7 @@ obj-y     = sched_plugin.o litmus.o \
1063  	    sync.o \
1064  	    rt_domain.o \
1065  	    edf_common.o \
1066 +		mc2_common.o \
1067  	    fp_common.o \
1068  	    fdso.o \
1069  	    locking.o \
1070 @@ -19,13 +20,18 @@ obj-y     = sched_plugin.o litmus.o \
1071  	    binheap.o \
1072  	    ctrldev.o \
1073  	    uncachedev.o \
1074 +		reservation.o \
1075 +		polling_reservations.o \
1076  	    sched_gsn_edf.o \
1077  	    sched_psn_edf.o \
1078 -	    sched_pfp.o
1079 +	    sched_pfp.o \
1080 +		sched_mc2.o \
1081 +		bank_proc.o \
1082 +	    color_shm.o \
1083 +		cache_proc.o
1084  
1085  obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
1086  obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
1087 -
1088  obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
1089  obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
1090  obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
1091 diff --git litmus/bank_proc.c litmus/bank_proc.c
1092 new file mode 100644
1093 index 0000000..097cff1
1094 --- /dev/null
1095 +++ litmus/bank_proc.c
1096 @@ -0,0 +1,793 @@
1097 +/*
1098 + * bank_proc.c -- Implementation of the page coloring for cache and bank partition. 
1099 + *                The file will keep a pool of colored pages. Users can require pages with 
1100 + *                specific color or bank number.
1101 + *                Part of the code is modified from Jonathan Herman's code  
1102 + */
1103 +#include <linux/init.h>
1104 +#include <linux/types.h>
1105 +#include <linux/kernel.h>
1106 +#include <linux/module.h>
1107 +#include <linux/sysctl.h>
1108 +#include <linux/slab.h>
1109 +#include <linux/io.h>
1110 +#include <linux/mutex.h>
1111 +#include <linux/mm.h>
1112 +#include <linux/random.h>
1113 +
1114 +#include <litmus/litmus_proc.h>
1115 +#include <litmus/sched_trace.h>
1116 +#include <litmus/litmus.h>
1117 +
1118 +#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
1119 +
1120 +// This Address Decoding is used in imx6-sabredsd platform
1121 +#define BANK_MASK  0x38000000     
1122 +#define BANK_SHIFT  27
1123 +#define CACHE_MASK  0x0000f000      
1124 +#define CACHE_SHIFT 12
1125 +
1126 +#define PAGES_PER_COLOR 2000
1127 +#define NUM_BANKS	8
1128 +#define NUM_COLORS	16
1129 +
1130 +unsigned int NUM_PAGE_LIST;  //8*16
1131 +
1132 +unsigned int number_banks;
1133 +unsigned int number_cachecolors;
1134 +
1135 +unsigned int set_partition_max = 0x0000ffff;
1136 +unsigned int set_partition_min = 0;
1137 +unsigned int bank_partition_max = 0x000000ff;
1138 +unsigned int bank_partition_min = 0;
1139 +
1140 +int show_page_pool = 0;
1141 +int refill_page_pool = 0;
1142 +spinlock_t reclaim_lock;
1143 +
1144 +unsigned int set_partition[9] = {
1145 +        0x00000003,  /* Core 0, and Level A*/
1146 +        0x00000003,  /* Core 0, and Level B*/
1147 +        0x0000000C,  /* Core 1, and Level A*/
1148 +        0x0000000C,  /* Core 1, and Level B*/
1149 +        0x00000030,  /* Core 2, and Level A*/
1150 +        0x00000030,  /* Core 2, and Level B*/
1151 +        0x000000C0,  /* Core 3, and Level A*/
1152 +        0x000000C0,  /* Core 3, and Level B*/
1153 +        0x0000ff00,  /* Level C */
1154 +};
1155 +
1156 +unsigned int bank_partition[9] = {
1157 +        0x00000010,  /* Core 0, and Level A*/
1158 +        0x00000010,  /* Core 0, and Level B*/
1159 +        0x00000020,  /* Core 1, and Level A*/
1160 +        0x00000020,  /* Core 1, and Level B*/
1161 +        0x00000040,  /* Core 2, and Level A*/
1162 +        0x00000040,  /* Core 2, and Level B*/
1163 +        0x00000080,  /* Core 3, and Level A*/
1164 +        0x00000080,  /* Core 3, and Level B*/
1165 +        0x0000000c,  /* Level C */
1166 +};
1167 +
1168 +unsigned int set_index[9] = {
1169 +    0, 0, 0, 0, 0, 0, 0, 0, 0
1170 +};
1171 +
1172 +unsigned int bank_index[9] = {
1173 +    0, 0, 0, 0, 0, 0, 0, 0, 0
1174 +};
1175 +
1176 +int node_index[9] = {
1177 +    -1, -1, -1, -1, -1, -1, -1, -1, -1
1178 +};
1179 +
1180 +struct mutex void_lockdown_proc;
1181 +
1182 +/*
1183 + * Every page list should contain a lock, a list, and a number recording how many pages it store
1184 + */ 
1185 +struct color_group {
1186 +	spinlock_t lock;
1187 +	char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
1188 +	struct list_head list;
1189 +	atomic_t nr_pages;
1190 +};
1191 +
1192 +
1193 +static struct color_group *color_groups;
1194 +
1195 +/*
1196 + * Naive function to count the number of 1's
1197 + */
1198 +unsigned int counting_one_set(unsigned int v)
1199 +{
1200 +    unsigned int c; // c accumulates the total bits set in v
1201 +
1202 +    for (c = 0; v; v >>= 1)
1203 +    {
1204 +        c += v & 1;
1205 +    }
1206 +    return c;
1207 +}
1208 +
1209 +unsigned int two_exp(unsigned int e)
1210 +{
1211 +    unsigned int v = 1;
1212 +    for (; e>0; e-- )
1213 +    {
1214 +        v=v*2;
1215 +    }
1216 +    return v;
1217 +}
1218 +
1219 +unsigned int num_by_bitmask_index(unsigned int bitmask, unsigned int index)
1220 +{
1221 +    unsigned int pos = 0;
1222 +
1223 +    while(true)
1224 +    {
1225 +        if(index ==0 && (bitmask & 1)==1)
1226 +        {
1227 +            break;
1228 +        }
1229 +        if(index !=0 && (bitmask & 1)==1){
1230 +            index--;
1231 +        }
1232 +        pos++;
1233 +        bitmask = bitmask >>1;
1234 +
1235 +    }
1236 +    return pos;
1237 +}
1238 +
1239 +/* helper functions to find the next colored pool index */
1240 +static inline unsigned int first_index(unsigned long node)
1241 +{
1242 +	unsigned int bank_no = 0, color_no = 0;
1243 +	
1244 +	while(bank_no < NUM_BANKS) {
1245 +		if ((bank_partition[node]>>bank_no) & 0x1)
1246 +			break;
1247 +		bank_no++;
1248 +	}
1249 +	while(color_no < NUM_COLORS) {
1250 +		if ((set_partition[node]>>color_no) & 0x1)
1251 +			break;
1252 +		color_no++;
1253 +	}
1254 +	return NUM_COLORS*bank_no + color_no; 
1255 +}
1256 +
1257 +static inline unsigned int last_index(unsigned long node)
1258 +{
1259 +	unsigned int bank_no = 7, color_no = 15;
1260 +	
1261 +	while(bank_no >= 0) {
1262 +		if ((bank_partition[node]>>bank_no) & 0x1)
1263 +			break;
1264 +		bank_no--;
1265 +	}
1266 +	while(color_no >= 0) {
1267 +		if ((set_partition[node]>>color_no) & 0x1)
1268 +			break;
1269 +		color_no--;
1270 +	}
1271 +	return NUM_COLORS*bank_no + color_no; 
1272 +}
1273 +
1274 +static inline unsigned int next_color(unsigned long node, unsigned int current_color)
1275 +{
1276 +	int try = 0, ret = 0;
1277 +	current_color++;
1278 +	if (current_color == NUM_COLORS) {
1279 +		current_color = 0;
1280 +		ret = 1;
1281 +	}
1282 +	
1283 +	while (try < NUM_COLORS) {
1284 +		if ((set_partition[node]>>current_color)&0x1)
1285 +			break;
1286 +		current_color++;
1287 +		if (current_color == NUM_COLORS) {
1288 +			current_color = 0;
1289 +			ret = 1;
1290 +		}
1291 +		try++;
1292 +	}
1293 +	if (!ret)
1294 +		return current_color;
1295 +	else
1296 +		return current_color + NUM_COLORS;
1297 +}
1298 +
1299 +static inline unsigned int next_bank(unsigned long node, unsigned int current_bank)
1300 +{
1301 +	int try = 0;
1302 +	current_bank++;
1303 +	if (current_bank == NUM_BANKS) {
1304 +		current_bank = 0;
1305 +	}
1306 +	
1307 +	while (try < NUM_BANKS) {
1308 +		if ((bank_partition[node]>>current_bank)&0x1)
1309 +			break;
1310 +		current_bank++;
1311 +		if (current_bank == NUM_BANKS) {
1312 +			current_bank = 0;
1313 +		}
1314 +		try++;
1315 +	}
1316 +	return current_bank;
1317 +}
1318 +
1319 +static inline unsigned int get_next_index(unsigned long node, unsigned int current_index)
1320 +{
1321 +	unsigned int bank_no, color_no, color_ret, bank_ret;
1322 +	bank_no = current_index>>4; // 2^4 = 16 colors
1323 +	color_no = current_index - bank_no*NUM_COLORS;
1324 +	bank_ret = bank_no;
1325 +	color_ret = next_color(node, color_no);
1326 +	if (color_ret >= NUM_COLORS) {
1327 +		// next bank
1328 +		color_ret -= NUM_COLORS;
1329 +		bank_ret = next_bank(node, bank_no);
1330 +	}
1331 +
1332 +	return bank_ret * NUM_COLORS + color_ret;
1333 +}
1334 +
1335 +/* Decoding page color, 0~15 */ 
1336 +static inline unsigned int page_color(struct page *page)
1337 +{
1338 +	return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
1339 +}
1340 +
1341 +/* Decoding page bank number, 0~7 */ 
1342 +static inline unsigned int page_bank(struct page *page)
1343 +{
1344 +	return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
1345 +}
1346 +
1347 +static inline unsigned int page_list_index(struct page *page)
1348 +{
1349 +    unsigned int idx;  
1350 +    idx = (page_color(page) + page_bank(page)*(number_cachecolors));
1351 +
1352 +    return idx; 
1353 +}
1354 +
1355 +
1356 +
1357 +/*
1358 + * It is used to determine the smallest number of page lists. 
1359 + */
1360 +static unsigned long smallest_nr_pages(void)
1361 +{
1362 +	unsigned long i, min_pages;
1363 +	struct color_group *cgroup;
1364 +	cgroup = &color_groups[16*2];
1365 +	min_pages =atomic_read(&cgroup->nr_pages); 
1366 +	for (i = 16*2; i < NUM_PAGE_LIST; ++i) {
1367 +		cgroup = &color_groups[i];
1368 +		if (atomic_read(&cgroup->nr_pages) < min_pages)
1369 +			min_pages = atomic_read(&cgroup->nr_pages);
1370 +	}
1371 +	return min_pages;
1372 +}
1373 +
1374 +static void show_nr_pages(void)
1375 +{
1376 +	unsigned long i;
1377 +	struct color_group *cgroup;
1378 +	printk("show nr pages***************************************\n");
1379 +	for (i = 0; i < NUM_PAGE_LIST; ++i) {
1380 +		cgroup = &color_groups[i];
1381 +		printk("(%03ld) =  %03d, ", i, atomic_read(&cgroup->nr_pages));
1382 +		if((i % 8) ==7) {
1383 +		    printk("\n");
1384 +		}
1385 +	}
1386 +}
1387 +
1388 +/*
1389 + * Add a page to current pool.
1390 + */
1391 +void add_page_to_color_list(struct page *page)
1392 +{
1393 +	const unsigned long color = page_list_index(page);
1394 +	struct color_group *cgroup = &color_groups[color];
1395 +	BUG_ON(in_list(&page->lru) || PageLRU(page));
1396 +	BUG_ON(page_count(page) > 1);
1397 +	spin_lock(&cgroup->lock);
1398 +	list_add_tail(&page->lru, &cgroup->list);
1399 +	atomic_inc(&cgroup->nr_pages);
1400 +	SetPageLRU(page);
1401 +	spin_unlock(&cgroup->lock);
1402 +}
1403 +
1404 +/*
1405 + * Replenish the page pool. 
1406 + * If the newly allocate page is what we want, it will be pushed to the correct page list
1407 + * otherwise, it will be freed. 
1408 + * A user needs to invoke this function until the page pool has enough pages.
1409 + */
1410 +static int do_add_pages(void)
1411 +{
1412 +	struct page *page, *page_tmp;
1413 +	LIST_HEAD(free_later);
1414 +	unsigned long color;
1415 +	int ret = 0;
1416 +	int i = 0;
1417 +	int free_counter = 0;
1418 +	unsigned long counter[128]= {0}; 
1419 +        
1420 +	// until all the page lists contain enough pages 
1421 +	for (i=0; i< 1024*20;i++) {
1422 +		page = alloc_page(GFP_HIGHUSER_MOVABLE);
1423 +	
1424 +		if (unlikely(!page)) {
1425 +			printk(KERN_WARNING "Could not allocate pages.\n");
1426 +			ret = -ENOMEM;
1427 +			goto out;
1428 +		}
1429 +		color = page_list_index(page);
1430 +		counter[color]++;
1431 +		if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=0) {
1432 +			add_page_to_color_list(page);
1433 +		} else {
1434 +			// Pages here will be freed later 
1435 +			list_add_tail(&page->lru, &free_later);
1436 +			free_counter++;
1437 +		}
1438 +	}
1439 +
1440 +	// Free the unwanted pages
1441 +	list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1442 +		list_del(&page->lru);
1443 +		__free_page(page);
1444 +	}
1445 +out:
1446 +        return ret;
1447 +}
1448 +
1449 +/*
1450 + * Provide pages for replacement according cache color 
1451 + * This should be the only implementation here
1452 + * This function should not be accessed by others directly. 
1453 + * 
1454 + */ 
1455 +static struct page *new_alloc_page_color( unsigned long color)
1456 +{
1457 +//	printk("allocate new page color = %d\n", color);	
1458 +	struct color_group *cgroup;
1459 +	struct page *rPage = NULL;
1460 +		
1461 +	if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
1462 +		TRACE_CUR("Wrong color %lu\n", color);	
1463 +		goto out;
1464 +	}
1465 +
1466 +		
1467 +	cgroup = &color_groups[color];
1468 +	spin_lock(&cgroup->lock);
1469 +	if (unlikely(!atomic_read(&cgroup->nr_pages))) {
1470 +		TRACE_CUR("No free %lu colored pages.\n", color);
1471 +		goto out_unlock;
1472 +	}
1473 +	rPage = list_first_entry(&cgroup->list, struct page, lru);
1474 +	BUG_ON(page_count(rPage) > 1);
1475 +	//get_page(rPage);
1476 +	list_del(&rPage->lru);
1477 +	atomic_dec(&cgroup->nr_pages);
1478 +	ClearPageLRU(rPage);
1479 +out_unlock:
1480 +	spin_unlock(&cgroup->lock);
1481 +out:
1482 +	return rPage;
1483 +}
1484 +
1485 +struct page* get_colored_page(unsigned long color)
1486 +{
1487 +	return new_alloc_page_color(color);
1488 +}
1489 +
1490 +/*
1491 + * provide pages for replacement according to  
1492 + * node = 0 for Level A tasks in Cpu 0
1493 + * node = 1 for Level B tasks in Cpu 0
1494 + * node = 2 for Level A tasks in Cpu 1
1495 + * node = 3 for Level B tasks in Cpu 1
1496 + * node = 4 for Level A tasks in Cpu 2
1497 + * node = 5 for Level B tasks in Cpu 2
1498 + * node = 6 for Level A tasks in Cpu 3
1499 + * node = 7 for Level B tasks in Cpu 3
1500 + * node = 8 for Level C tasks 
1501 + */
1502 +struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
1503 +{
1504 +	struct page *rPage = NULL;
1505 +	int try = 0;
1506 +	unsigned int idx;
1507 +	
1508 +	if (node_index[node] == -1)
1509 +		idx = first_index(node);
1510 +	else
1511 +		idx = node_index[node];
1512 +	
1513 +	BUG_ON(idx<0 || idx>127);
1514 +	rPage =  new_alloc_page_color(idx);
1515 +	if (node_index[node] == last_index(node))
1516 +		node_index[node] = first_index(node);
1517 +	else
1518 +		node_index[node]++;
1519 +
1520 +	while (!rPage)  {
1521 +		try++;
1522 +		if (try>=256)
1523 +			break;
1524 +		idx = get_next_index(node, idx);
1525 +		printk(KERN_ALERT "try = %d out of page! requesting node  = %ld, idx = %d\n", try, node, idx);
1526 +		BUG_ON(idx<0 || idx>127);
1527 +		rPage = new_alloc_page_color(idx);
1528 +	}
1529 +	node_index[node] = idx;
1530 +	return rPage; 
1531 +}
1532 +
1533 +
1534 +/*
1535 + * Reclaim pages.
1536 + */
1537 +void reclaim_page(struct page *page)
1538 +{
1539 +	const unsigned long color = page_list_index(page);
1540 +	spin_lock(&reclaim_lock);
1541 +    	put_page(page);
1542 +	add_page_to_color_list(page);
1543 +
1544 +	spin_unlock(&reclaim_lock);
1545 +	printk("Reclaimed page(%ld) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1546 +}
1547 +
1548 +
1549 +/*
1550 + * Initialize the numbers of banks and cache colors 
1551 + */ 
1552 +static void __init init_variables(void)
1553 +{
1554 +	number_banks = counting_one_set(BANK_MASK); 
1555 +	number_banks = two_exp(number_banks); 
1556 +
1557 +	number_cachecolors = counting_one_set(CACHE_MASK);
1558 +	number_cachecolors = two_exp(number_cachecolors);
1559 +	NUM_PAGE_LIST = number_banks * number_cachecolors; 
1560 +        printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
1561 +	mutex_init(&void_lockdown_proc);
1562 +	spin_lock_init(&reclaim_lock);
1563 +
1564 +}
1565 +
1566 +
1567 +/*
1568 + * Initialize the page pool 
1569 + */
1570 +static int __init init_color_groups(void)
1571 +{
1572 +	struct color_group *cgroup;
1573 +	unsigned long i;
1574 +	int err = 0;
1575 +
1576 +        printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
1577 +        color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
1578 +
1579 +	if (!color_groups) {
1580 +		printk(KERN_WARNING "Could not allocate color groups.\n");
1581 +		err = -ENOMEM;
1582 +	}else{
1583 +
1584 +		for (i = 0; i < NUM_PAGE_LIST; ++i) {
1585 +			cgroup = &color_groups[i];
1586 +			atomic_set(&cgroup->nr_pages, 0);
1587 +			INIT_LIST_HEAD(&cgroup->list);
1588 +			spin_lock_init(&cgroup->lock);
1589 +		}
1590 +	}
1591 +        return err;
1592 +}
1593 +
1594 +int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1595 +		size_t *lenp, loff_t *ppos)
1596 +{
1597 +	int ret = 0, i = 0;
1598 +	mutex_lock(&void_lockdown_proc);
1599 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1600 +	if (ret)
1601 +		goto out;
1602 +	if (write) {
1603 +            printk("New set Partition : \n");
1604 +	    for(i =0;i <9;i++)
1605 +            {
1606 +                set_index[i] = 0;
1607 +                printk("set[%d] = %x \n", i, set_partition[i]);
1608 +            }
1609 +	}
1610 +out:
1611 +	mutex_unlock(&void_lockdown_proc);
1612 +	return ret;
1613 +}
1614 +
1615 +int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1616 +		size_t *lenp, loff_t *ppos)
1617 +{
1618 +	int ret = 0, i = 0;
1619 +	mutex_lock(&void_lockdown_proc);
1620 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1621 +	if (ret)
1622 +		goto out;
1623 +	if (write) {
1624 +	    for(i =0;i <9;i++)
1625 +            {
1626 +                bank_index[i] = 0;
1627 +            }
1628 +	}
1629 +out:
1630 +	mutex_unlock(&void_lockdown_proc);
1631 +	return ret;
1632 +}
1633 +
1634 +int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1635 +		size_t *lenp, loff_t *ppos)
1636 +{
1637 +	int ret = 0;
1638 +	mutex_lock(&void_lockdown_proc);
1639 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1640 +	if (ret)
1641 +		goto out;
1642 +	if (write) {
1643 +            show_nr_pages();
1644 +	}
1645 +out:
1646 +	mutex_unlock(&void_lockdown_proc);
1647 +	return ret;
1648 +}
1649 +
1650 +int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1651 +		size_t *lenp, loff_t *ppos)
1652 +{
1653 +	int ret = 0;
1654 +	mutex_lock(&void_lockdown_proc);
1655 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1656 +	if (ret)
1657 +		goto out;
1658 +	if (write) {
1659 +            do_add_pages();
1660 +			show_nr_pages();
1661 +	}
1662 +out:
1663 +	mutex_unlock(&void_lockdown_proc);
1664 +	return ret;
1665 +}
1666 +
1667 +static struct ctl_table cache_table[] =
1668 +{
1669 +        
1670 +	{
1671 +		.procname	= "C0_LA_set",
1672 +		.mode		= 0666,
1673 +		.proc_handler	= set_partition_handler,
1674 +		.data		= &set_partition[0],
1675 +		.maxlen		= sizeof(set_partition[0]),
1676 +		.extra1		= &set_partition_min,
1677 +		.extra2		= &set_partition_max,
1678 +	},	
1679 +	{
1680 +		.procname	= "C0_LB_set",
1681 +		.mode		= 0666,
1682 +		.proc_handler	= set_partition_handler,
1683 +		.data		= &set_partition[1],
1684 +		.maxlen		= sizeof(set_partition[1]),
1685 +		.extra1		= &set_partition_min,
1686 +		.extra2		= &set_partition_max,
1687 +	},	
1688 +	{
1689 +		.procname	= "C1_LA_set",
1690 +		.mode		= 0666,
1691 +		.proc_handler	= set_partition_handler,
1692 +		.data		= &set_partition[2],
1693 +		.maxlen		= sizeof(set_partition[2]),
1694 +		.extra1		= &set_partition_min,
1695 +		.extra2		= &set_partition_max,
1696 +	},
1697 +	{
1698 +		.procname	= "C1_LB_set",
1699 +		.mode		= 0666,
1700 +		.proc_handler	= set_partition_handler,
1701 +		.data		= &set_partition[3],
1702 +		.maxlen		= sizeof(set_partition[3]),
1703 +		.extra1		= &set_partition_min,
1704 +		.extra2		= &set_partition_max,
1705 +	},
1706 +	{
1707 +		.procname	= "C2_LA_set",
1708 +		.mode		= 0666,
1709 +		.proc_handler	= set_partition_handler,
1710 +		.data		= &set_partition[4],
1711 +		.maxlen		= sizeof(set_partition[4]),
1712 +		.extra1		= &set_partition_min,
1713 +		.extra2		= &set_partition_max,
1714 +	},
1715 +	{
1716 +		.procname	= "C2_LB_set",
1717 +		.mode		= 0666,
1718 +		.proc_handler	= set_partition_handler,
1719 +		.data		= &set_partition[5],
1720 +		.maxlen		= sizeof(set_partition[5]),
1721 +		.extra1		= &set_partition_min,
1722 +		.extra2		= &set_partition_max,
1723 +	},
1724 +	{
1725 +		.procname	= "C3_LA_set",
1726 +		.mode		= 0666,
1727 +		.proc_handler	= set_partition_handler,
1728 +		.data		= &set_partition[6],
1729 +		.maxlen		= sizeof(set_partition[6]),
1730 +		.extra1		= &set_partition_min,
1731 +		.extra2		= &set_partition_max,
1732 +	},
1733 +	{
1734 +		.procname	= "C3_LB_set",
1735 +		.mode		= 0666,
1736 +		.proc_handler	= set_partition_handler,
1737 +		.data		= &set_partition[7],
1738 +		.maxlen		= sizeof(set_partition[7]),
1739 +		.extra1		= &set_partition_min,
1740 +		.extra2		= &set_partition_max,
1741 +	},	
1742 +	{
1743 +		.procname	= "Call_LC_set",
1744 +		.mode		= 0666,
1745 +		.proc_handler	= set_partition_handler,
1746 +		.data		= &set_partition[8],
1747 +		.maxlen		= sizeof(set_partition[8]),
1748 +		.extra1		= &set_partition_min,
1749 +		.extra2		= &set_partition_max,
1750 +	},	
1751 +	{
1752 +		.procname	= "C0_LA_bank",
1753 +		.mode		= 0666,
1754 +		.proc_handler	= bank_partition_handler,
1755 +		.data		= &bank_partition[0],
1756 +		.maxlen		= sizeof(set_partition[0]),
1757 +		.extra1		= &bank_partition_min,
1758 +		.extra2		= &bank_partition_max,
1759 +	},
1760 +	{
1761 +		.procname	= "C0_LB_bank",
1762 +		.mode		= 0666,
1763 +		.proc_handler	= bank_partition_handler,
1764 +		.data		= &bank_partition[1],
1765 +		.maxlen		= sizeof(set_partition[1]),
1766 +		.extra1		= &bank_partition_min,
1767 +		.extra2		= &bank_partition_max,
1768 +	},		
1769 +	{
1770 +		.procname	= "C1_LA_bank",
1771 +		.mode		= 0666,
1772 +		.proc_handler	= bank_partition_handler,
1773 +		.data		= &bank_partition[2],
1774 +		.maxlen		= sizeof(set_partition[2]),
1775 +		.extra1		= &bank_partition_min,
1776 +		.extra2		= &bank_partition_max,
1777 +	},
1778 +	{
1779 +		.procname	= "C1_LB_bank",
1780 +		.mode		= 0666,
1781 +		.proc_handler	= bank_partition_handler,
1782 +		.data		= &bank_partition[3],
1783 +		.maxlen		= sizeof(set_partition[3]),
1784 +		.extra1		= &bank_partition_min,
1785 +		.extra2		= &bank_partition_max,
1786 +	},
1787 +	{
1788 +		.procname	= "C2_LA_bank",
1789 +		.mode		= 0666,
1790 +		.proc_handler	= bank_partition_handler,
1791 +		.data		= &bank_partition[4],
1792 +		.maxlen		= sizeof(set_partition[4]),
1793 +		.extra1		= &bank_partition_min,
1794 +		.extra2		= &bank_partition_max,
1795 +	},	
1796 +	{
1797 +		.procname	= "C2_LB_bank",
1798 +		.mode		= 0666,
1799 +		.proc_handler	= bank_partition_handler,
1800 +		.data		= &bank_partition[5],
1801 +		.maxlen		= sizeof(set_partition[5]),
1802 +		.extra1		= &bank_partition_min,
1803 +		.extra2		= &bank_partition_max,
1804 +	},		
1805 +	{
1806 +		.procname	= "C3_LA_bank",
1807 +		.mode		= 0666,
1808 +		.proc_handler	= bank_partition_handler,
1809 +		.data		= &bank_partition[6],
1810 +		.maxlen		= sizeof(set_partition[6]),
1811 +		.extra1		= &bank_partition_min,
1812 +		.extra2		= &bank_partition_max,
1813 +	},	
1814 +	{
1815 +		.procname	= "C3_LB_bank",
1816 +		.mode		= 0666,
1817 +		.proc_handler	= bank_partition_handler,
1818 +		.data		= &bank_partition[7],
1819 +		.maxlen		= sizeof(set_partition[7]),
1820 +		.extra1		= &bank_partition_min,
1821 +		.extra2		= &bank_partition_max,
1822 +	},	
1823 +	{
1824 +		.procname	= "Call_LC_bank",
1825 +		.mode		= 0666,
1826 +		.proc_handler	= bank_partition_handler,
1827 +		.data		= &bank_partition[8],
1828 +		.maxlen		= sizeof(set_partition[8]),
1829 +		.extra1		= &bank_partition_min,
1830 +		.extra2		= &bank_partition_max,
1831 +	},	
1832 +	{
1833 +		.procname	= "show_page_pool",
1834 +		.mode		= 0666,
1835 +		.proc_handler	= show_page_pool_handler,
1836 +		.data		= &show_page_pool,
1837 +		.maxlen		= sizeof(show_page_pool),
1838 +	},		{
1839 +		.procname	= "refill_page_pool",
1840 +		.mode		= 0666,
1841 +		.proc_handler	= refill_page_pool_handler,
1842 +		.data		= &refill_page_pool,
1843 +		.maxlen		= sizeof(refill_page_pool),
1844 +	},	
1845 +	{ }
1846 +};
1847 +
1848 +static struct ctl_table litmus_dir_table[] = {
1849 +	{
1850 +		.procname	= "litmus",
1851 + 		.mode		= 0555,
1852 +		.child		= cache_table,
1853 +	},
1854 +	{ }
1855 +};
1856 +
1857 +
1858 +static struct ctl_table_header *litmus_sysctls;
1859 +
1860 +
1861 +/*
1862 + * Initialzie this proc 
1863 + */
1864 +static int __init litmus_color_init(void)
1865 +{
1866 +	int err=0;
1867 +        printk("Init bankproc.c\n");
1868 +
1869 +	init_variables();
1870 +
1871 +	printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
1872 +
1873 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
1874 +	if (!litmus_sysctls) {
1875 +		printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
1876 +		err = -EFAULT;
1877 +		goto out;
1878 +	}
1879 +
1880 +	init_color_groups();			
1881 +	do_add_pages();
1882 +
1883 +	printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
1884 +out:
1885 +	return err;
1886 +}
1887 +
1888 +module_init(litmus_color_init);
1889 +
1890 diff --git litmus/budget.c litmus/budget.c
1891 index 47bf78a..d67f4b3 100644
1892 --- litmus/budget.c
1893 +++ litmus/budget.c
1894 @@ -1,9 +1,11 @@
1895  #include <linux/sched.h>
1896  #include <linux/percpu.h>
1897  #include <linux/hrtimer.h>
1898 +#include <linux/uaccess.h>
1899  
1900  #include <litmus/litmus.h>
1901  #include <litmus/preempt.h>
1902 +#include <litmus/sched_plugin.h>
1903  
1904  #include <litmus/budget.h>
1905  
1906 @@ -113,4 +115,54 @@ static int __init init_budget_enforcement(void)
1907  	return 0;
1908  }
1909  
1910 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining)
1911 +{
1912 +	struct task_struct *t = current;
1913 +	unsigned long flags;
1914 +	s64 delta;
1915 +
1916 +	local_irq_save(flags);
1917 +
1918 +	delta = sched_clock_cpu(smp_processor_id()) - t->se.exec_start;
1919 +	if (delta < 0)
1920 +		delta = 0;
1921 +
1922 +	TRACE_CUR("current_budget: sc:%llu start:%llu lt_t:%llu delta:%lld exec-time:%llu rem:%llu\n",
1923 +		sched_clock_cpu(smp_processor_id()), t->se.exec_start,
1924 +		litmus_clock(), delta,
1925 +		tsk_rt(t)->job_params.exec_time,
1926 +		budget_remaining(t));
1927 +
1928 +	if (used_so_far)
1929 +		*used_so_far = tsk_rt(t)->job_params.exec_time + delta;
1930 +
1931 +	if (remaining) {
1932 +		*remaining = budget_remaining(t);
1933 +		if (*remaining > delta)
1934 +			*remaining -= delta;
1935 +		else
1936 +			*remaining = 0;
1937 +	}
1938 +
1939 +	local_irq_restore(flags);
1940 +}
1941 +
1942 +asmlinkage long sys_get_current_budget(
1943 +	lt_t __user * _expended,
1944 +	lt_t __user *_remaining)
1945 +{
1946 +	lt_t expended = 0, remaining = 0;
1947 +
1948 +	if (is_realtime(current))
1949 +		litmus->current_budget(&expended, &remaining);
1950 +
1951 +	if (_expended && put_user(expended, _expended))
1952 +		return -EFAULT;
1953 +
1954 +	if (_remaining && put_user(remaining, _remaining))
1955 +		return -EFAULT;
1956 +
1957 +	return 0;
1958 +}
1959 +
1960  module_init(init_budget_enforcement);
1961 diff --git litmus/cache_proc.c litmus/cache_proc.c
1962 new file mode 100644
1963 index 0000000..5cdf953
1964 --- /dev/null
1965 +++ litmus/cache_proc.c
1966 @@ -0,0 +1,1414 @@
1967 +#include <asm/uaccess.h>
1968 +#include <linux/uaccess.h>
1969 +#include <linux/init.h>
1970 +#include <linux/types.h>
1971 +#include <linux/kernel.h>
1972 +#include <linux/module.h>
1973 +#include <linux/sysctl.h>
1974 +#include <linux/slab.h>
1975 +#include <linux/io.h>
1976 +#include <linux/mutex.h>
1977 +#include <linux/time.h>
1978 +#include <linux/random.h>
1979 +
1980 +#include <litmus/litmus_proc.h>
1981 +#include <litmus/sched_trace.h>
1982 +#include <litmus/cache_proc.h>
1983 +#include <litmus/mc2_common.h>
1984 +#include <litmus/litmus.h>
1985 +
1986 +#include <asm/hardware/cache-l2x0.h>
1987 +#include <asm/cacheflush.h>
1988 +
1989 +
1990 +#define UNLOCK_ALL	0x00000000 /* allocation in any way */
1991 +#define LOCK_ALL        (~UNLOCK_ALL)
1992 +#define MAX_NR_WAYS	16
1993 +#define MAX_NR_COLORS	16
1994 +#define CACHELINE_SIZE 32
1995 +#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
1996 +#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
1997 +
1998 +typedef struct cacheline
1999 +{
2000 +        int line[INTS_IN_CACHELINE];
2001 +} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
2002 +
2003 +void mem_lock(u32 lock_val, int cpu);
2004 +
2005 +/*
2006 + * unlocked_way[i] : allocation can occur in way i
2007 + *
2008 + * 0 = allocation can occur in the corresponding way
2009 + * 1 = allocation cannot occur in the corresponding way
2010 + */
2011 +u32 unlocked_way[MAX_NR_WAYS]  = {
2012 +	0xFFFFFFFE, /* way 0 unlocked */
2013 +	0xFFFFFFFD,
2014 +	0xFFFFFFFB,
2015 +	0xFFFFFFF7,
2016 +	0xFFFFFFEF, /* way 4 unlocked */
2017 +	0xFFFFFFDF,
2018 +	0xFFFFFFBF,
2019 +	0xFFFFFF7F,
2020 +	0xFFFFFEFF, /* way 8 unlocked */
2021 +	0xFFFFFDFF,
2022 +	0xFFFFFBFF,
2023 +	0xFFFFF7FF,
2024 +	0xFFFFEFFF, /* way 12 unlocked */
2025 +	0xFFFFDFFF,
2026 +	0xFFFFBFFF,
2027 +	0xFFFF7FFF,
2028 +};
2029 +
2030 +u32 nr_unlocked_way[MAX_NR_WAYS+1]  = {
2031 +	0x0000FFFF, /* all ways are locked. usable = 0*/
2032 +	0x0000FFFE, /* way ~0 unlocked. usable = 1 */
2033 +	0x0000FFFC,
2034 +	0x0000FFF8,
2035 +	0x0000FFF0,
2036 +	0x0000FFE0,
2037 +	0x0000FFC0,
2038 +	0x0000FF80,
2039 +	0x0000FF00,
2040 +	0x0000FE00,
2041 +	0x0000FC00,
2042 +	0x0000F800,
2043 +	0x0000F000,
2044 +	0x0000E000,
2045 +	0x0000C000,
2046 +	0x00008000,
2047 +	0x00000000, /* way ~15 unlocked. usable = 16 */
2048 +};
2049 +
2050 +u32 way_partition[4] = {
2051 +	0xfffffff0, /* cpu0 */
2052 +	0xffffff0f, /* cpu1 */
2053 +	0xfffff0ff, /* cpu2 */
2054 +	0xffff0fff, /* cpu3 */
2055 +};
2056 +
2057 +u32 way_partitions[9] = {
2058 +	0xffff0003, /* cpu0 A */
2059 +	0xffff0003, /* cpu0 B */
2060 +	0xffff000C, /* cpu1 A */
2061 +	0xffff000C, /* cpu1 B */
2062 +	0xffff0030, /* cpu2 A */
2063 +	0xffff0030, /* cpu2 B */
2064 +	0xffff00C0, /* cpu3 A */
2065 +	0xffff00C0, /* cpu3 B */
2066 +	0xffffff00, /* lv C */
2067 +};
2068 +
2069 +u32 prev_lockdown_d_reg[5] = {
2070 +	0x0000FF00,
2071 +	0x0000FF00,
2072 +	0x0000FF00,
2073 +	0x0000FF00,
2074 +	0x000000FF, /* share with level-C */
2075 +};
2076 +
2077 +u32 prev_lockdown_i_reg[5] = {
2078 +	0x0000FF00,
2079 +	0x0000FF00,
2080 +	0x0000FF00,
2081 +	0x0000FF00,
2082 +	0x000000FF, /* share with level-C */
2083 +};
2084 +
2085 +u32 prev_lbm_i_reg[8] = {
2086 +	0x00000000,
2087 +	0x00000000,
2088 +	0x00000000,
2089 +	0x00000000,
2090 +	0x00000000,
2091 +	0x00000000,
2092 +	0x00000000,
2093 +	0x00000000,
2094 +};
2095 +
2096 +u32 prev_lbm_d_reg[8] = {
2097 +	0x00000000,
2098 +	0x00000000,
2099 +	0x00000000,
2100 +	0x00000000,
2101 +	0x00000000,
2102 +	0x00000000,
2103 +	0x00000000,
2104 +	0x00000000,
2105 +};
2106 +
2107 +static void __iomem *cache_base;
2108 +static void __iomem *lockreg_d;
2109 +static void __iomem *lockreg_i;
2110 +
2111 +static u32 cache_id;
2112 +
2113 +struct mutex actlr_mutex;
2114 +struct mutex l2x0_prefetch_mutex;
2115 +struct mutex lockdown_proc;
2116 +static u32 way_partition_min;
2117 +static u32 way_partition_max;
2118 +
2119 +static int zero = 0;
2120 +static int one = 1;
2121 +
2122 +static int l1_prefetch_proc;
2123 +static int l2_prefetch_hint_proc;
2124 +static int l2_double_linefill_proc;
2125 +static int l2_data_prefetch_proc;
2126 +static int os_isolation;
2127 +static int use_part;
2128 +
2129 +u32 lockdown_reg[9] = {
2130 +	0x00000000,
2131 +	0x00000000,
2132 +	0x00000000,
2133 +	0x00000000,
2134 +	0x00000000,
2135 +	0x00000000,
2136 +	0x00000000,
2137 +	0x00000000,
2138 +};
2139 +	
2140 +
2141 +#define ld_d_reg(cpu) ({ int __cpu = cpu; \
2142 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
2143 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
2144 +#define ld_i_reg(cpu) ({ int __cpu = cpu; \
2145 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
2146 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
2147 +
2148 +int lock_all;
2149 +int nr_lockregs;
2150 +static raw_spinlock_t cache_lock;
2151 +static raw_spinlock_t prefetch_lock;
2152 +static void ***flusher_pages = NULL;
2153 +
2154 +extern void l2c310_flush_all(void);
2155 +
2156 +static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
2157 +{
2158 +	/* wait for cache operation by line or way to complete */
2159 +	while (readl_relaxed(reg) & mask)
2160 +		cpu_relax();
2161 +}
2162 +
2163 +#ifdef CONFIG_CACHE_L2X0 
2164 +static inline void cache_wait(void __iomem *reg, unsigned long mask)
2165 +{
2166 +	/* cache operations by line are atomic on PL310 */
2167 +}
2168 +#else
2169 +#define cache_wait	cache_wait_way
2170 +#endif
2171 +
2172 +static inline void cache_sync(void)
2173 +{
2174 +	void __iomem *base = cache_base;
2175 +
2176 +	writel_relaxed(0, base + L2X0_CACHE_SYNC);
2177 +	cache_wait(base + L2X0_CACHE_SYNC, 1);
2178 +}
2179 +
2180 +static void print_lockdown_registers(int cpu)
2181 +{
2182 +	int i;
2183 +	//for (i = 0; i < nr_lockregs; i++) {
2184 +	for (i = 0; i < 4; i++) {
2185 +		printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
2186 +				i, readl_relaxed(ld_d_reg(i)));
2187 +		printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
2188 +				i, readl_relaxed(ld_i_reg(i)));
2189 +	}
2190 +}
2191 +
2192 +static void test_lockdown(void *ignore)
2193 +{
2194 +	int i, cpu;
2195 +
2196 +	cpu = smp_processor_id();
2197 +	printk("Start lockdown test on CPU %d.\n", cpu);
2198 +
2199 +	for (i = 0; i < nr_lockregs; i++) {
2200 +		printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
2201 +		printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i));
2202 +	}
2203 +
2204 +	printk("Lockdown initial state:\n");
2205 +	print_lockdown_registers(cpu);
2206 +	printk("---\n");
2207 +
2208 +	for (i = 0; i < nr_lockregs; i++) {
2209 +		writel_relaxed(1, ld_d_reg(i));
2210 +		writel_relaxed(2, ld_i_reg(i));
2211 +	}
2212 +	printk("Lockdown all data=1 instr=2:\n");
2213 +	print_lockdown_registers(cpu);
2214 +	printk("---\n");
2215 +
2216 +	for (i = 0; i < nr_lockregs; i++) {
2217 +		writel_relaxed((1 << i), ld_d_reg(i));
2218 +		writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
2219 +	}
2220 +	printk("Lockdown varies:\n");
2221 +	print_lockdown_registers(cpu);
2222 +	printk("---\n");
2223 +
2224 +	for (i = 0; i < nr_lockregs; i++) {
2225 +		writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
2226 +		writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
2227 +	}
2228 +	printk("Lockdown all zero:\n");
2229 +	print_lockdown_registers(cpu);
2230 +
2231 +	printk("End lockdown test.\n");
2232 +}
2233 +
2234 +void litmus_setup_lockdown(void __iomem *base, u32 id)
2235 +{
2236 +	cache_base = base;
2237 +	cache_id = id;
2238 +	lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
2239 +	lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
2240 +    
2241 +	if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
2242 +		nr_lockregs = 8;
2243 +	} else {
2244 +		printk("Unknown cache ID!\n");
2245 +		nr_lockregs = 1;
2246 +	}
2247 +	
2248 +	mutex_init(&actlr_mutex);
2249 +	mutex_init(&l2x0_prefetch_mutex);
2250 +	mutex_init(&lockdown_proc);
2251 +	raw_spin_lock_init(&cache_lock);
2252 +	raw_spin_lock_init(&prefetch_lock);
2253 +	
2254 +	test_lockdown(NULL);
2255 +}
2256 +
2257 +int way_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2258 +		size_t *lenp, loff_t *ppos)
2259 +{
2260 +	int ret = 0, i;
2261 +	unsigned long flags;
2262 +	
2263 +	mutex_lock(&lockdown_proc);
2264 +	
2265 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2266 +	if (ret)
2267 +		goto out;
2268 +	
2269 +	if (write) {
2270 +		printk("Way-partition settings:\n");
2271 +		for (i = 0; i < 9; i++) {
2272 +			printk("0x%08X\n", way_partitions[i]);
2273 +		}
2274 +		for (i = 0; i < 4; i++) {
2275 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2276 +				       i * L2X0_LOCKDOWN_STRIDE);
2277 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2278 +				       i * L2X0_LOCKDOWN_STRIDE);
2279 +		}
2280 +	}
2281 +	
2282 +	local_irq_save(flags);
2283 +	print_lockdown_registers(smp_processor_id());
2284 +	l2c310_flush_all();
2285 +	local_irq_restore(flags);
2286 +out:
2287 +	mutex_unlock(&lockdown_proc);
2288 +	return ret;
2289 +}
2290 +
2291 +int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
2292 +		size_t *lenp, loff_t *ppos)
2293 +{
2294 +	int ret = 0, i;
2295 +	unsigned long flags;
2296 +	
2297 +	mutex_lock(&lockdown_proc);
2298 +	
2299 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2300 +	if (ret)
2301 +		goto out;
2302 +	
2303 +	if (write && lock_all == 1) {
2304 +		for (i = 0; i < nr_lockregs; i++) {
2305 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2306 +				       i * L2X0_LOCKDOWN_STRIDE);
2307 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2308 +				       i * L2X0_LOCKDOWN_STRIDE);
2309 +		}
2310 +/*		
2311 +		for (i = 0; i < nr_lockregs;  i++) {
2312 +			barrier();
2313 +			mem_lock(LOCK_ALL, i);
2314 +			barrier();
2315 +			//writel_relaxed(nr_unlocked_way[0], ld_d_reg(i));
2316 +			//writel_relaxed(nr_unlocked_way[0], ld_i_reg(i));
2317 +		}
2318 +*/		
2319 +	}
2320 +	if (write && lock_all == 0) {
2321 +		for (i = 0; i < nr_lockregs; i++) {
2322 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2323 +				       i * L2X0_LOCKDOWN_STRIDE);
2324 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2325 +				       i * L2X0_LOCKDOWN_STRIDE);
2326 +		}
2327 +/*
2328 +		for (i = 0; i < nr_lockregs;  i++) {
2329 +			barrier();
2330 +			mem_lock(UNLOCK_ALL, i);
2331 +			barrier();
2332 +			//writel_relaxed(nr_unlocked_way[16], ld_d_reg(i));
2333 +			//writel_relaxed(nr_unlocked_way[16], ld_i_reg(i));
2334 +		}
2335 +*/
2336 +	}
2337 +	printk("LOCK_ALL HANDLER\n");
2338 +	local_irq_save(flags);
2339 +	print_lockdown_registers(smp_processor_id());
2340 +	l2c310_flush_all();
2341 +	local_irq_restore(flags);
2342 +out:
2343 +	mutex_unlock(&lockdown_proc);
2344 +	return ret;
2345 +}
2346 +
2347 +void cache_lockdown(u32 lock_val, int cpu)
2348 +{
2349 +	//unsigned long flags;
2350 +	//raw_spin_lock_irqsave(&cache_lock, flags);
2351 +
2352 +	__asm__ __volatile__ (
2353 +"	str	%[lockval], [%[dcachereg]]\n"
2354 +"	str	%[lockval], [%[icachereg]]\n"
2355 +	: 
2356 +	: [dcachereg] "r" (ld_d_reg(cpu)),
2357 +	  [icachereg] "r" (ld_i_reg(cpu)),
2358 +	  [lockval] "r" (lock_val)
2359 +	: "cc");
2360 +
2361 +	//raw_spin_unlock_irqrestore(&cache_lock, flags);
2362 +}
2363 +
2364 +void do_partition(enum crit_level lv, int cpu)
2365 +{
2366 +	u32 regs;
2367 +	unsigned long flags;
2368 +	
2369 +	if (lock_all || !use_part)
2370 +		return;
2371 +	raw_spin_lock_irqsave(&cache_lock, flags);
2372 +	switch(lv) {
2373 +		case CRIT_LEVEL_A:
2374 +			regs = ~way_partitions[cpu*2];
2375 +			regs &= 0x0000ffff;
2376 +			break;
2377 +		case CRIT_LEVEL_B:
2378 +			regs = ~way_partitions[cpu*2+1];
2379 +			regs &= 0x0000ffff;
2380 +			break;
2381 +		case CRIT_LEVEL_C:
2382 +		case NUM_CRIT_LEVELS:
2383 +			regs = ~way_partitions[8];
2384 +			regs &= 0x0000ffff;
2385 +			break;
2386 +		case MODE_POLL_TASK:
2387 +			regs = 0x0000ffff;
2388 +			break;
2389 +		default:
2390 +			BUG();
2391 +
2392 +	}
2393 +	barrier();
2394 +	//cache_lockdown(regs, cpu);
2395 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2396 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2397 +	barrier();
2398 +
2399 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
2400 +	
2401 +	//flush_cache(0);
2402 +}
2403 +
2404 +void lock_cache(int cpu, u32 val)
2405 +{
2406 +	unsigned long flags;
2407 +	
2408 +	local_irq_save(flags);
2409 +	if (val != 0xffffffff) {
2410 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2411 +					   cpu * L2X0_LOCKDOWN_STRIDE);
2412 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2413 +					   cpu * L2X0_LOCKDOWN_STRIDE);
2414 +	}
2415 +	else {
2416 +		int i;
2417 +		for (i = 0; i < 4; i++)
2418 +			do_partition(CRIT_LEVEL_A, i);
2419 +	}
2420 +	local_irq_restore(flags);
2421 +}
2422 +
2423 +int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
2424 +		size_t *lenp, loff_t *ppos)
2425 +{
2426 +	int ret = 0;
2427 +	
2428 +	mutex_lock(&lockdown_proc);
2429 +
2430 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2431 +	if (ret)
2432 +		goto out;
2433 +	
2434 +
2435 +	printk("USE_PART HANDLER = %d\n", use_part);
2436 +
2437 +out:
2438 +	mutex_unlock(&lockdown_proc);
2439 +	return ret;
2440 +}
2441 +
2442 +int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
2443 +		size_t *lenp, loff_t *ppos)
2444 +{
2445 +	int ret = 0;
2446 +	
2447 +	mutex_lock(&lockdown_proc);
2448 +	
2449 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2450 +	if (ret)
2451 +		goto out;
2452 +	
2453 +
2454 +	printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
2455 +
2456 +out:
2457 +	mutex_unlock(&lockdown_proc);
2458 +	return ret;
2459 +}
2460 +
2461 +int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
2462 +		size_t *lenp, loff_t *ppos)
2463 +{
2464 +	int ret = 0, i;
2465 +	
2466 +	mutex_lock(&lockdown_proc);
2467 +	
2468 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2469 +	if (ret)
2470 +		goto out;
2471 +	
2472 +	if (write) {
2473 +		for (i = 0; i < nr_lockregs; i++) {
2474 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2475 +				       i * L2X0_LOCKDOWN_STRIDE);
2476 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2477 +				       i * L2X0_LOCKDOWN_STRIDE);
2478 +		}
2479 +	}
2480 +
2481 +out:
2482 +	mutex_unlock(&lockdown_proc);
2483 +	return ret;
2484 +}
2485 +
2486 +int lockdown_global_handler(struct ctl_table *table, int write, void __user *buffer,
2487 +		size_t *lenp, loff_t *ppos)
2488 +{
2489 +	int ret = 0, i;
2490 +	
2491 +	mutex_lock(&lockdown_proc);
2492 +	
2493 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2494 +	if (ret)
2495 +		goto out;
2496 +	
2497 +	if (write) {
2498 +		for (i = 0; i < nr_lockregs; i++) {
2499 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2500 +				       i * L2X0_LOCKDOWN_STRIDE);
2501 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2502 +				       i * L2X0_LOCKDOWN_STRIDE);
2503 +		}
2504 +	}
2505 +
2506 +out:
2507 +	mutex_unlock(&lockdown_proc);
2508 +	return ret;
2509 +}
2510 +
2511 +void inline enter_irq_mode(void)
2512 +{
2513 +	int cpu = smp_processor_id();
2514 +
2515 +	if (os_isolation == 0)
2516 +		return;	
2517 +
2518 +	prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2519 +	prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2520 +	
2521 +	writel_relaxed(way_partitions[8], ld_i_reg(cpu));
2522 +	writel_relaxed(way_partitions[8], ld_d_reg(cpu));
2523 +}
2524 +
2525 +void inline exit_irq_mode(void)
2526 +{
2527 +	int cpu = smp_processor_id();
2528 +
2529 +	if (os_isolation == 0)
2530 +		return;
2531 +	writel_relaxed(prev_lockdown_i_reg[cpu], ld_i_reg(cpu));
2532 +	writel_relaxed(prev_lockdown_d_reg[cpu], ld_d_reg(cpu));	
2533 +}
2534 +
2535 +/* Operate on the Cortex-A9's ACTLR register */
2536 +#define ACTLR_L2_PREFETCH_HINT	(1 << 1)
2537 +#define ACTLR_L1_PREFETCH	(1 << 2)
2538 +
2539 +/*
2540 + * Change the ACTLR.
2541 + * @mode	- If 1 (0), set (clear) the bit given in @mask in the ACTLR.
2542 + * @mask	- A mask in which one bit is set to operate on the ACTLR.
2543 + */
2544 +static void actlr_change(int mode, int mask)
2545 +{
2546 +	u32 orig_value, new_value, reread_value;
2547 +
2548 +	if (0 != mode && 1 != mode) {
2549 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2550 +				__FUNCTION__);
2551 +		return;
2552 +	}
2553 +
2554 +	/* get the original value */
2555 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (orig_value));
2556 +
2557 +	if (0 == mode)
2558 +		new_value = orig_value & ~(mask);
2559 +	else
2560 +		new_value = orig_value | mask;
2561 +
2562 +	asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (new_value));
2563 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (reread_value));
2564 +
2565 +	printk("ACTLR: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2566 +			orig_value, new_value, reread_value);
2567 +}
2568 +
2569 +int litmus_l1_prefetch_proc_handler(struct ctl_table *table, int write,
2570 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2571 +{
2572 +	int ret, mode;
2573 +
2574 +	mutex_lock(&actlr_mutex);
2575 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2576 +
2577 +	if (!ret && write) {
2578 +		mode = *((int*)table->data);
2579 +		actlr_change(mode, ACTLR_L1_PREFETCH);
2580 +	}
2581 +	mutex_unlock(&actlr_mutex);
2582 +
2583 +	return ret;
2584 +}
2585 +
2586 +int litmus_l2_prefetch_hint_proc_handler(struct ctl_table *table, int write,
2587 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2588 +{
2589 +	int ret, mode;
2590 +
2591 +	mutex_lock(&actlr_mutex);
2592 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2593 +	if (!ret && write) {
2594 +		mode = *((int*)table->data);
2595 +		actlr_change(mode, ACTLR_L2_PREFETCH_HINT);
2596 +	}
2597 +	mutex_unlock(&actlr_mutex);
2598 +
2599 +	return ret;
2600 +}
2601 +
2602 +
2603 +/* Operate on the PL-310's Prefetch Control Register, L310_PREFETCH_CTRL */
2604 +#define L2X0_PREFETCH_DOUBLE_LINEFILL	(1 << 30)
2605 +#define L2X0_PREFETCH_INST_PREFETCH	(1 << 29)
2606 +#define L2X0_PREFETCH_DATA_PREFETCH	(1 << 28)
2607 +static void l2x0_prefetch_change(int mode, int mask)
2608 +{
2609 +	u32 orig_value, new_value, reread_value;
2610 +
2611 +	if (0 != mode && 1 != mode) {
2612 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2613 +				__FUNCTION__);
2614 +		return;
2615 +	}
2616 +
2617 +	orig_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2618 +
2619 +	if (0 == mode)
2620 +		new_value = orig_value & ~(mask);
2621 +	else
2622 +		new_value = orig_value | mask;
2623 +
2624 +	writel_relaxed(new_value, cache_base + L310_PREFETCH_CTRL);
2625 +	reread_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2626 +
2627 +	printk("l2x0 prefetch: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2628 +			orig_value, new_value, reread_value);
2629 +}
2630 +
2631 +int litmus_l2_double_linefill_proc_handler(struct ctl_table *table, int write,
2632 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2633 +{
2634 +	int ret, mode;
2635 +
2636 +	mutex_lock(&l2x0_prefetch_mutex);
2637 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2638 +	if (!ret && write) {
2639 +		mode = *((int*)table->data);
2640 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DOUBLE_LINEFILL);
2641 +	}
2642 +	mutex_unlock(&l2x0_prefetch_mutex);
2643 +
2644 +	return ret;
2645 +}
2646 +
2647 +int litmus_l2_data_prefetch_proc_handler(struct ctl_table *table, int write,
2648 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2649 +{
2650 +	int ret, mode;
2651 +
2652 +	mutex_lock(&l2x0_prefetch_mutex);
2653 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2654 +	if (!ret && write) {
2655 +		mode = *((int*)table->data);
2656 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DATA_PREFETCH|L2X0_PREFETCH_INST_PREFETCH);
2657 +	}
2658 +	mutex_unlock(&l2x0_prefetch_mutex);
2659 +
2660 +	return ret;
2661 +}
2662 +
2663 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
2664 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2665 +
2666 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
2667 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2668 +		
2669 +static struct ctl_table cache_table[] =
2670 +{
2671 +	{
2672 +		.procname	= "C0_LA_way",
2673 +		.mode		= 0666,
2674 +		.proc_handler	= way_partition_handler,
2675 +		.data		= &way_partitions[0],
2676 +		.maxlen		= sizeof(way_partitions[0]),
2677 +		.extra1		= &way_partition_min,
2678 +		.extra2		= &way_partition_max,
2679 +	},	
2680 +	{
2681 +		.procname	= "C0_LB_way",
2682 +		.mode		= 0666,
2683 +		.proc_handler	= way_partition_handler,
2684 +		.data		= &way_partitions[1],
2685 +		.maxlen		= sizeof(way_partitions[1]),
2686 +		.extra1		= &way_partition_min,
2687 +		.extra2		= &way_partition_max,
2688 +	},	
2689 +	{
2690 +		.procname	= "C1_LA_way",
2691 +		.mode		= 0666,
2692 +		.proc_handler	= way_partition_handler,
2693 +		.data		= &way_partitions[2],
2694 +		.maxlen		= sizeof(way_partitions[2]),
2695 +		.extra1		= &way_partition_min,
2696 +		.extra2		= &way_partition_max,
2697 +	},
2698 +	{
2699 +		.procname	= "C1_LB_way",
2700 +		.mode		= 0666,
2701 +		.proc_handler	= way_partition_handler,
2702 +		.data		= &way_partitions[3],
2703 +		.maxlen		= sizeof(way_partitions[3]),
2704 +		.extra1		= &way_partition_min,
2705 +		.extra2		= &way_partition_max,
2706 +	},
2707 +	{
2708 +		.procname	= "C2_LA_way",
2709 +		.mode		= 0666,
2710 +		.proc_handler	= way_partition_handler,
2711 +		.data		= &way_partitions[4],
2712 +		.maxlen		= sizeof(way_partitions[4]),
2713 +		.extra1		= &way_partition_min,
2714 +		.extra2		= &way_partition_max,
2715 +	},
2716 +	{
2717 +		.procname	= "C2_LB_way",
2718 +		.mode		= 0666,
2719 +		.proc_handler	= way_partition_handler,
2720 +		.data		= &way_partitions[5],
2721 +		.maxlen		= sizeof(way_partitions[5]),
2722 +		.extra1		= &way_partition_min,
2723 +		.extra2		= &way_partition_max,
2724 +	},
2725 +	{
2726 +		.procname	= "C3_LA_way",
2727 +		.mode		= 0666,
2728 +		.proc_handler	= way_partition_handler,
2729 +		.data		= &way_partitions[6],
2730 +		.maxlen		= sizeof(way_partitions[6]),
2731 +		.extra1		= &way_partition_min,
2732 +		.extra2		= &way_partition_max,
2733 +	},
2734 +	{
2735 +		.procname	= "C3_LB_way",
2736 +		.mode		= 0666,
2737 +		.proc_handler	= way_partition_handler,
2738 +		.data		= &way_partitions[7],
2739 +		.maxlen		= sizeof(way_partitions[7]),
2740 +		.extra1		= &way_partition_min,
2741 +		.extra2		= &way_partition_max,
2742 +	},	
2743 +	{
2744 +		.procname	= "Call_LC_way",
2745 +		.mode		= 0666,
2746 +		.proc_handler	= way_partition_handler,
2747 +		.data		= &way_partitions[8],
2748 +		.maxlen		= sizeof(way_partitions[8]),
2749 +		.extra1		= &way_partition_min,
2750 +		.extra2		= &way_partition_max,
2751 +	},		
2752 +	{
2753 +		.procname	= "lock_all",
2754 +		.mode		= 0666,
2755 +		.proc_handler	= lock_all_handler,
2756 +		.data		= &lock_all,
2757 +		.maxlen		= sizeof(lock_all),
2758 +		.extra1		= &zero,
2759 +		.extra2		= &one,
2760 +	},
2761 +	{
2762 +		.procname	= "l1_prefetch",
2763 +		.mode		= 0644,
2764 +		.proc_handler	= litmus_l1_prefetch_proc_handler,
2765 +		.data		= &l1_prefetch_proc,
2766 +		.maxlen		= sizeof(l1_prefetch_proc),
2767 +	},
2768 +	{
2769 +		.procname	= "l2_prefetch_hint",
2770 +		.mode		= 0644,
2771 +		.proc_handler	= litmus_l2_prefetch_hint_proc_handler,
2772 +		.data		= &l2_prefetch_hint_proc,
2773 +		.maxlen		= sizeof(l2_prefetch_hint_proc),
2774 +	},
2775 +	{
2776 +		.procname	= "l2_double_linefill",
2777 +		.mode		= 0644,
2778 +		.proc_handler	= litmus_l2_double_linefill_proc_handler,
2779 +		.data		= &l2_double_linefill_proc,
2780 +		.maxlen		= sizeof(l2_double_linefill_proc),
2781 +	},
2782 +	{
2783 +		.procname	= "l2_data_prefetch",
2784 +		.mode		= 0644,
2785 +		.proc_handler	= litmus_l2_data_prefetch_proc_handler,
2786 +		.data		= &l2_data_prefetch_proc,
2787 +		.maxlen		= sizeof(l2_data_prefetch_proc),
2788 +	},
2789 +	{
2790 +		.procname	= "os_isolation",
2791 +		.mode		= 0644,
2792 +		.proc_handler	= os_isolation_proc_handler,
2793 +		.data		= &os_isolation,
2794 +		.maxlen		= sizeof(os_isolation),
2795 +	},
2796 +	{
2797 +		.procname	= "use_part",
2798 +		.mode		= 0644,
2799 +		.proc_handler	= use_part_proc_handler,
2800 +		.data		= &use_part,
2801 +		.maxlen		= sizeof(use_part),
2802 +	},
2803 +	{
2804 +		.procname	= "do_perf_test",
2805 +		.mode		= 0644,
2806 +		.proc_handler	= do_perf_test_proc_handler,
2807 +	},
2808 +	{
2809 +		.procname	= "setup_flusher",
2810 +		.mode		= 0644,
2811 +		.proc_handler	= setup_flusher_proc_handler,
2812 +	},
2813 +	{
2814 +		.procname	= "lockdown_reg_0",
2815 +		.mode		= 0644,
2816 +		.proc_handler	= lockdown_reg_handler,
2817 +		.data		= &lockdown_reg[0],
2818 +		.maxlen		= sizeof(lockdown_reg[0]),
2819 +		.extra1		= &way_partition_min,
2820 +		.extra2		= &way_partition_max,
2821 +	},
2822 +	{
2823 +		.procname	= "lockdown_reg_1",
2824 +		.mode		= 0644,
2825 +		.proc_handler	= lockdown_reg_handler,
2826 +		.data		= &lockdown_reg[1],
2827 +		.maxlen		= sizeof(lockdown_reg[1]),
2828 +		.extra1		= &way_partition_min,
2829 +		.extra2		= &way_partition_max,
2830 +	},
2831 +	{
2832 +		.procname	= "lockdown_reg_2",
2833 +		.mode		= 0644,
2834 +		.proc_handler	= lockdown_reg_handler,
2835 +		.data		= &lockdown_reg[2],
2836 +		.maxlen		= sizeof(lockdown_reg[2]),
2837 +		.extra1		= &way_partition_min,
2838 +		.extra2		= &way_partition_max,
2839 +	},
2840 +	{
2841 +		.procname	= "lockdown_reg_3",
2842 +		.mode		= 0644,
2843 +		.proc_handler	= lockdown_reg_handler,
2844 +		.data		= &lockdown_reg[3],
2845 +		.maxlen		= sizeof(lockdown_reg[3]),
2846 +		.extra1		= &way_partition_min,
2847 +		.extra2		= &way_partition_max,
2848 +	},
2849 +	{
2850 +		.procname	= "lockdown_regs",
2851 +		.mode		= 0644,
2852 +		.proc_handler	= lockdown_global_handler,
2853 +		.data		= &lockdown_reg[8],
2854 +		.maxlen		= sizeof(lockdown_reg[8]),
2855 +		.extra1		= &way_partition_min,
2856 +		.extra2		= &way_partition_max,
2857 +	},
2858 +	{ }
2859 +};
2860 +
2861 +static struct ctl_table litmus_dir_table[] = {
2862 +	{
2863 +		.procname	= "litmus",
2864 + 		.mode		= 0555,
2865 +		.child		= cache_table,
2866 +	},
2867 +	{ }
2868 +};
2869 +
2870 +u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
2871 +{
2872 +	u32 v = 0;
2873 +
2874 +	__asm__ __volatile__ (
2875 +"	.align 5\n"
2876 +"	str	%[lockval], [%[cachereg]]\n"
2877 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2878 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2879 +"	bgt	1b\n				@ read more, if necessary\n"
2880 +	: [addr] "+r" (start),
2881 +	  [val] "+r" (v)
2882 +	: [end] "r" (end),
2883 +#ifdef CONFIG_CACHE_L2X0
2884 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2885 +#else
2886 +	  [cachereg] "r" (lockreg_d),
2887 +#endif
2888 +	  [lockval] "r" (lock_val)
2889 +	: "cc");
2890 +
2891 +	return v;
2892 +}
2893 +
2894 +
2895 +/*
2896 + * Prefetch by reading the first word of each cache line in a page.
2897 + *
2898 + * @lockdown_reg: address of the lockdown register to write
2899 + * @lock_val: value to be written to @lockdown_reg
2900 + * @unlock_val: will unlock the cache to this value
2901 + * @addr: start address to be prefetched
2902 + * @end_addr: end address to prefetch (exclusive)
2903 + *
2904 + * Assumes: addr < end_addr AND addr != end_addr
2905 + */
2906 +u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
2907 +{
2908 +#ifndef CONFIG_CACHE_L2X0
2909 +	unsigned long flags;
2910 +#endif
2911 +	u32 v = 0;
2912 +
2913 +#ifndef CONFIG_CACHE_L2X0
2914 +	raw_spin_lock_irqsave(&prefetch_lock, flags);
2915 +#endif
2916 +
2917 +	__asm__ __volatile__ (
2918 +"	.align 5\n"
2919 +"	str	%[lockval], [%[cachereg]]\n"
2920 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2921 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2922 +"	bgt	1b\n				@ read more, if necessary\n"
2923 +"	str	%[unlockval], [%[cachereg]]\n"
2924 +	: [addr] "+r" (start),
2925 +	  [val] "+r" (v)
2926 +	: [end] "r" (end),
2927 +#ifdef CONFIG_CACHE_L2X0
2928 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2929 +#else
2930 +	  [cachereg] "r" (lockreg_d),
2931 +#endif
2932 +	  [lockval] "r" (lock_val),
2933 +	  [unlockval] "r" (unlock_val)
2934 +	: "cc");
2935 +
2936 +#ifndef CONFIG_CACHE_L2X0
2937 +	raw_spin_unlock_irqrestore(&prefetch_lock, flags);
2938 +#endif
2939 +
2940 +	return v;
2941 +}
2942 +
2943 +static long update_timeval(struct timespec lhs, struct timespec rhs)
2944 +{
2945 +	long val;
2946 +	struct timespec ts;
2947 +
2948 +	ts = timespec_sub(rhs, lhs);
2949 +	val = ts.tv_sec*NSEC_PER_SEC + ts.tv_nsec;
2950 +
2951 +	return val;
2952 +}
2953 +
2954 +extern void v7_flush_kern_dcache_area(void *, size_t);
2955 +extern void v7_flush_kern_cache_all(void);
2956 +/*
2957 + * Ensure that this page is not in the L1 or L2 cache.
2958 + * Since the L1 cache is VIPT and the L2 cache is PIPT, we can use either the
2959 + * kernel or user vaddr.
2960 + */
2961 +void color_flush_page(void *vaddr, size_t size)
2962 +{
2963 +	v7_flush_kern_dcache_area(vaddr, size);
2964 +	//v7_flush_kern_cache_all();
2965 +}
2966 +
2967 +extern struct page* get_colored_page(unsigned long color);
2968 +
2969 +int setup_flusher_array(void)
2970 +{
2971 +	int color, way, ret = 0;
2972 +	struct page *page;
2973 +
2974 +	if (flusher_pages != NULL)
2975 +		goto out;
2976 +
2977 +	flusher_pages = (void***) kmalloc(MAX_NR_WAYS
2978 +			* sizeof(*flusher_pages), GFP_KERNEL);
2979 +	if (!flusher_pages) {
2980 +		printk(KERN_WARNING "No memory for flusher array!\n");
2981 +		ret = -EINVAL;
2982 +		goto out;
2983 +	}
2984 +
2985 +	for (way = 0; way < MAX_NR_WAYS; way++) {
2986 +		void **flusher_color_arr;
2987 +		flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
2988 +				* MAX_NR_COLORS, GFP_KERNEL);
2989 +		if (!flusher_color_arr) {
2990 +			printk(KERN_WARNING "No memory for flusher array!\n");
2991 +			ret = -ENOMEM;
2992 +			goto out_free;
2993 +		}
2994 +
2995 +		flusher_pages[way] = flusher_color_arr;
2996 +
2997 +		for (color = 0; color < MAX_NR_COLORS; color++) {
2998 +			int node;
2999 +			switch (color) {
3000 +				case 0:
3001 +					node = 48;
3002 +					break;
3003 +				case 1:
3004 +					node = 49;
3005 +					break;
3006 +				case 2:
3007 +					node = 50;
3008 +					break;
3009 +				case 3:
3010 +					node = 51;
3011 +					break;
3012 +				case 4:
3013 +					node = 68;
3014 +					break;
3015 +				case 5:
3016 +					node = 69;
3017 +					break;
3018 +				case 6:
3019 +					node = 86;
3020 +					break;
3021 +				case 7:
3022 +					node = 87;
3023 +					break;
3024 +				case 8:
3025 +					node = 88;
3026 +					break;
3027 +				case 9:
3028 +					node = 105;
3029 +					break;
3030 +				case 10:
3031 +					node = 106;
3032 +					break;
3033 +				case 11:
3034 +					node = 107;
3035 +					break;
3036 +				case 12:
3037 +					node = 108;
3038 +					break;					
3039 +				case 13:
3040 +					node = 125;
3041 +					break;
3042 +				case 14:
3043 +					node = 126;
3044 +					break;
3045 +				case 15:
3046 +					node = 127;
3047 +					break;
3048 +			}	
3049 +			page = get_colored_page(node);
3050 +			if (!page) {
3051 +				printk(KERN_WARNING "no more colored pages\n");
3052 +				ret = -EINVAL;
3053 +				goto out_free;
3054 +			}
3055 +			flusher_pages[way][color] = page_address(page);
3056 +			if (!flusher_pages[way][color]) {
3057 +				printk(KERN_WARNING "bad page address\n");