Attachment 'MC2-litmus-rt-rtss16.patch'

Download

   1 diff --git arch/arm/boot/compressed/Makefile arch/arm/boot/compressed/Makefile
   2 index 6e1fb2b..e2284fe 100644
   3 --- arch/arm/boot/compressed/Makefile
   4 +++ arch/arm/boot/compressed/Makefile
   5 @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
   6  ORIG_CFLAGS := $(KBUILD_CFLAGS)
   7  KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
   8  endif
   9 +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
  10  
  11  ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  12  asflags-y := -DZIMAGE
  13 diff --git arch/arm/include/asm/unistd.h arch/arm/include/asm/unistd.h
  14 index 7197bbe..5291b70 100644
  15 --- arch/arm/include/asm/unistd.h
  16 +++ arch/arm/include/asm/unistd.h
  17 @@ -19,7 +19,7 @@
  18   * This may need to be greater than __NR_last_syscall+1 in order to
  19   * account for the padding in the syscall table
  20   */
  21 -#define __NR_syscalls  (388 + NR_litmus_syscalls + 3)
  22 +#define __NR_syscalls  (388 + NR_litmus_syscalls + 0)
  23  
  24  
  25  /*
  26 diff --git arch/arm/kernel/calls.S arch/arm/kernel/calls.S
  27 index 3e00296..55dc863 100644
  28 --- arch/arm/kernel/calls.S
  29 +++ arch/arm/kernel/calls.S
  30 @@ -410,7 +410,13 @@
  31  		CALL(sys_release_ts)
  32  		CALL(sys_null_call)
  33  /* 400 */	CALL(sys_get_current_budget)
  34 -
  35 +		CALL(sys_reservation_create)
  36 +		CALL(sys_reservation_destroy)
  37 +		CALL(sys_set_mc2_task_param)
  38 +		CALL(sys_set_page_color)
  39 +/* 405 */	CALL(sys_test_call)
  40 +		CALL(sys_run_test)
  41 +		CALL(sys_lock_buffer)
  42  
  43  #ifndef syscalls_counted
  44  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
  45 diff --git arch/arm/mm/cache-l2x0.c arch/arm/mm/cache-l2x0.c
  46 index e309c8f..71c969a 100644
  47 --- arch/arm/mm/cache-l2x0.c
  48 +++ arch/arm/mm/cache-l2x0.c
  49 @@ -33,6 +33,8 @@
  50  #include "cache-tauros3.h"
  51  #include "cache-aurora-l2.h"
  52  
  53 +#include <litmus/cache_proc.h>
  54 +
  55  struct l2c_init_data {
  56  	const char *type;
  57  	unsigned way_size_0;
  58 @@ -726,7 +728,6 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  59  
  60  	if (n) {
  61  		unsigned i;
  62 -
  63  		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
  64  		for (i = 0; i < n; i++)
  65  			pr_cont(" %s", errata[i]);
  66 @@ -774,6 +775,11 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
  67  	},
  68  };
  69  
  70 +void l2c310_flush_all(void)
  71 +{
  72 +	l2c210_flush_all();
  73 +};
  74 +
  75  static int __init __l2c_init(const struct l2c_init_data *data,
  76  			     u32 aux_val, u32 aux_mask, u32 cache_id)
  77  {
  78 @@ -876,6 +882,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
  79  	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
  80  		data->type, cache_id, aux);
  81  
  82 +	litmus_setup_lockdown(l2x0_base, cache_id);
  83 +	
  84  	return 0;
  85  }
  86  
  87 diff --git arch/x86/syscalls/syscall_32.tbl arch/x86/syscalls/syscall_32.tbl
  88 index a0ec8ab..b303a9b 100644
  89 --- arch/x86/syscalls/syscall_32.tbl
  90 +++ arch/x86/syscalls/syscall_32.tbl
  91 @@ -378,3 +378,10 @@
  92  369	i386	release_ts		sys_release_ts
  93  370	i386	null_call		sys_null_call
  94  371	i386	get_current_budget	sys_get_current_budget
  95 +372	i386	reservation_create	sys_reservation_create
  96 +373	i386	reservation_destroy	sys_reservation_destroy
  97 +374	i386	set_mc2_task_param	sys_set_mc2_task_param
  98 +375	i386	set_page_color		sys_set_page_color
  99 +376	i386	test_call		sys_test_call
 100 +377	i386	run_test		sys_run_test
 101 +378	i386	lock_buffer		sys_lock_buffer
 102 diff --git arch/x86/syscalls/syscall_64.tbl arch/x86/syscalls/syscall_64.tbl
 103 index 04f5b74..5f24a80 100644
 104 --- arch/x86/syscalls/syscall_64.tbl
 105 +++ arch/x86/syscalls/syscall_64.tbl
 106 @@ -343,7 +343,13 @@
 107  361	common	release_ts		sys_release_ts
 108  362	common	null_call		sys_null_call
 109  363	common	get_current_budget	sys_get_current_budget
 110 -
 111 +364	common	reservation_create	sys_reservation_create
 112 +365	common	reservation_destroy	sys_reservation_destroy
 113 +366	common	set_mc2_task_param	sys_set_mc2_task_param
 114 +367	common	set_page_color		sys_set_page_color
 115 +368	common	test_call		sys_test_call
 116 +369	common	run_test		sys_run_test
 117 +370	common	lock_buffer		sys_lock_buffer
 118  
 119  #
 120  # x32-specific system call numbers start at 512 to avoid cache impact
 121 diff --git include/litmus/cache_proc.h include/litmus/cache_proc.h
 122 new file mode 100644
 123 index 0000000..e9440de
 124 --- /dev/null
 125 +++ include/litmus/cache_proc.h
 126 @@ -0,0 +1,17 @@
 127 +#ifndef LITMUS_CACHE_PROC_H
 128 +#define LITMUS_CACHE_PROC_H
 129 +
 130 +#ifdef __KERNEL__
 131 +
 132 +void litmus_setup_lockdown(void __iomem*, u32);
 133 +void enter_irq_mode(void);
 134 +void exit_irq_mode(void);
 135 +void flush_cache(int all);
 136 +void lock_cache(int cpu, u32 val);
 137 +
 138 +extern struct page *new_alloc_page_color(unsigned long color);
 139 +
 140 +#endif
 141 +
 142 +#endif
 143 +
 144 diff --git include/litmus/litmus.h include/litmus/litmus.h
 145 index a6eb534..441210c 100644
 146 --- include/litmus/litmus.h
 147 +++ include/litmus/litmus.h
 148 @@ -113,6 +113,13 @@ static inline lt_t litmus_clock(void)
 149  	((current)->state == TASK_RUNNING || 	\
 150  	 preempt_count() & PREEMPT_ACTIVE)
 151  
 152 +#define is_running(t) 			\
 153 +	((t)->state == TASK_RUNNING || 	\
 154 +	 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
 155 +
 156 +#define is_blocked(t)       \
 157 +	(!is_running(t))
 158 +
 159  #define is_released(t, now)	\
 160  	(lt_before_eq(get_release(t), now))
 161  #define is_tardy(t, now)    \
 162 diff --git include/litmus/mc2_common.h include/litmus/mc2_common.h
 163 new file mode 100644
 164 index 0000000..e3c0af2
 165 --- /dev/null
 166 +++ include/litmus/mc2_common.h
 167 @@ -0,0 +1,31 @@
 168 +/*
 169 + * MC^2 common data structures
 170 + */
 171 + 
 172 +#ifndef __UNC_MC2_COMMON_H__
 173 +#define __UNC_MC2_COMMON_H__
 174 +
 175 +enum crit_level {
 176 +	CRIT_LEVEL_A = 0,
 177 +	CRIT_LEVEL_B = 1,
 178 +	CRIT_LEVEL_C = 2,
 179 +	NUM_CRIT_LEVELS = 3,
 180 +};
 181 +
 182 +struct mc2_task {
 183 +	enum crit_level crit;
 184 +	unsigned int res_id;
 185 +};
 186 +
 187 +#ifdef __KERNEL__
 188 +
 189 +#include <litmus/reservation.h>
 190 +
 191 +#define tsk_mc2_data(t)		(tsk_rt(t)->mc2_data)
 192 +
 193 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
 194 +							struct reservation *res);
 195 +	
 196 +#endif /* __KERNEL__ */
 197 +
 198 +#endif
 199 \ No newline at end of file
 200 diff --git include/litmus/polling_reservations.h include/litmus/polling_reservations.h
 201 new file mode 100644
 202 index 0000000..66c9b1e
 203 --- /dev/null
 204 +++ include/litmus/polling_reservations.h
 205 @@ -0,0 +1,36 @@
 206 +#ifndef LITMUS_POLLING_RESERVATIONS_H
 207 +#define LITMUS_POLLING_RESERVATIONS_H
 208 +
 209 +#include <litmus/reservation.h>
 210 +
 211 +struct polling_reservation {
 212 +	/* extend basic reservation */
 213 +	struct reservation res;
 214 +
 215 +	lt_t max_budget;
 216 +	lt_t period;
 217 +	lt_t deadline;
 218 +	lt_t offset;
 219 +};
 220 +
 221 +void polling_reservation_init(struct polling_reservation *pres, int use_edf_prio,
 222 +	int use_periodic_polling, lt_t budget, lt_t period, lt_t deadline, lt_t offset);
 223 +
 224 +struct table_driven_reservation {
 225 +	/* extend basic reservation */
 226 +	struct reservation res;
 227 +
 228 +	lt_t major_cycle;
 229 +	unsigned int next_interval;
 230 +	unsigned int num_intervals;
 231 +	struct lt_interval *intervals;
 232 +
 233 +	/* info about current scheduling slot */
 234 +	struct lt_interval cur_interval;
 235 +	lt_t major_cycle_start;
 236 +};
 237 +
 238 +void table_driven_reservation_init(struct table_driven_reservation *tdres,
 239 +	lt_t major_cycle, struct lt_interval *intervals, unsigned int num_intervals);
 240 +
 241 +#endif
 242 diff --git include/litmus/reservation.h include/litmus/reservation.h
 243 new file mode 100644
 244 index 0000000..7e022b3
 245 --- /dev/null
 246 +++ include/litmus/reservation.h
 247 @@ -0,0 +1,256 @@
 248 +#ifndef LITMUS_RESERVATION_H
 249 +#define LITMUS_RESERVATION_H
 250 +
 251 +#include <linux/list.h>
 252 +#include <linux/hrtimer.h>
 253 +
 254 +struct reservation_client;
 255 +struct reservation_environment;
 256 +struct reservation;
 257 +
 258 +typedef enum {
 259 +	/* reservation has no clients, is not consuming budget */
 260 +	RESERVATION_INACTIVE = 0,
 261 +
 262 +	/* reservation has clients, consumes budget when scheduled */
 263 +	RESERVATION_ACTIVE,
 264 +
 265 +	/* reservation has no clients, but may be consuming budget */
 266 +	RESERVATION_ACTIVE_IDLE,
 267 +
 268 +	/* Reservation has no budget and waits for
 269 +	 * replenishment. May or may not have clients. */
 270 +	RESERVATION_DEPLETED,
 271 +} reservation_state_t;
 272 +
 273 +
 274 +/* ************************************************************************** */
 275 +
 276 +/* Select which task to dispatch. If NULL is returned, it means there is nothing
 277 + * to schedule right now and background work can be scheduled. */
 278 +typedef struct task_struct * (*dispatch_t)  (
 279 +	struct reservation_client *client
 280 +);
 281 +
 282 +/* Something that can be managed in a reservation and that can yield
 283 + * a process for dispatching. Contains a pointer to the reservation
 284 + * to which it "belongs". */
 285 +struct reservation_client {
 286 +	struct list_head list;
 287 +	struct reservation* reservation;
 288 +	dispatch_t dispatch;
 289 +};
 290 +
 291 +
 292 +/* ************************************************************************** */
 293 +
 294 +/* Called by reservations to request state change. */
 295 +typedef void (*reservation_change_state_t)  (
 296 +	struct reservation_environment* env,
 297 +	struct reservation *res,
 298 +	reservation_state_t new_state
 299 +);
 300 +
 301 +/* The framework within wich reservations operate. */
 302 +struct reservation_environment {
 303 +	lt_t time_zero;
 304 +	lt_t current_time;
 305 +
 306 +	/* services invoked by reservations */
 307 +	reservation_change_state_t change_state;
 308 +};
 309 +
 310 +
 311 +/* ************************************************************************** */
 312 +
 313 +/* A new client is added or an existing client resumes. */
 314 +typedef void (*client_arrives_t)  (
 315 +	struct reservation *reservation,
 316 +	struct reservation_client *client
 317 +);
 318 +
 319 +/* A client suspends or terminates. */
 320 +typedef void (*client_departs_t)  (
 321 +	struct reservation *reservation,
 322 +	struct reservation_client *client,
 323 +	int did_signal_job_completion
 324 +);
 325 +
 326 +/* A previously requested replenishment has occurred. */
 327 +typedef void (*on_replenishment_timer_t)  (
 328 +	struct reservation *reservation
 329 +);
 330 +
 331 +/* Update the reservation's budget to reflect execution or idling. */
 332 +typedef void (*drain_budget_t) (
 333 +	struct reservation *reservation,
 334 +	lt_t how_much
 335 +);
 336 +
 337 +/* Select a ready task from one of the clients for scheduling. */
 338 +typedef struct task_struct* (*dispatch_client_t)  (
 339 +	struct reservation *reservation,
 340 +	lt_t *time_slice /* May be used to force rescheduling after
 341 +	                    some amount of time. 0 => no limit */
 342 +);
 343 +
 344 +
 345 +struct reservation_ops {
 346 +	dispatch_client_t dispatch_client;
 347 +
 348 +	client_arrives_t client_arrives;
 349 +	client_departs_t client_departs;
 350 +
 351 +	on_replenishment_timer_t replenish;
 352 +	drain_budget_t drain_budget;
 353 +};
 354 +
 355 +struct reservation {
 356 +	/* used to queue in environment */
 357 +	struct list_head list;
 358 +
 359 +	reservation_state_t state;
 360 +	unsigned int id;
 361 +
 362 +	/* exact meaning defined by impl. */
 363 +	lt_t priority;
 364 +	lt_t cur_budget;
 365 +	lt_t next_replenishment;
 366 +
 367 +	/* budget stats */
 368 +	lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
 369 +	lt_t budget_consumed_total;
 370 +
 371 +	/* interaction with framework */
 372 +	struct reservation_environment *env;
 373 +	struct reservation_ops *ops;
 374 +
 375 +	struct list_head clients;
 376 +	
 377 +	/* for global env. */
 378 +	int scheduled_on;
 379 +	int event_added;
 380 +	/* for blocked by ghost. Do not charge budget when ACTIVE */
 381 +	int blocked_by_ghost;
 382 +	/* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
 383 +	int is_ghost;
 384 +};
 385 +
 386 +void reservation_init(struct reservation *res);
 387 +
 388 +/* Default implementations */
 389 +
 390 +/* simply select the first client in the list, set *for_at_most to zero */
 391 +struct task_struct* default_dispatch_client(
 392 +	struct reservation *res,
 393 +	lt_t *for_at_most
 394 +);
 395 +
 396 +/* "connector" reservation client to hook up tasks with reservations */
 397 +struct task_client {
 398 +	struct reservation_client client;
 399 +	struct task_struct *task;
 400 +};
 401 +
 402 +void task_client_init(struct task_client *tc, struct task_struct *task,
 403 +	struct reservation *reservation);
 404 +
 405 +#define SUP_RESCHEDULE_NOW (0)
 406 +#define SUP_NO_SCHEDULER_UPDATE (ULLONG_MAX)
 407 +
 408 +/* A simple uniprocessor (SUP) flat (i.e., non-hierarchical) reservation
 409 + * environment.
 410 + */
 411 +struct sup_reservation_environment {
 412 +	struct reservation_environment env;
 413 +
 414 +	/* ordered by priority */
 415 +	struct list_head active_reservations;
 416 +
 417 +	/* ordered by next_replenishment */
 418 +	struct list_head depleted_reservations;
 419 +
 420 +	/* unordered */
 421 +	struct list_head inactive_reservations;
 422 +
 423 +	/* - SUP_RESCHEDULE_NOW means call sup_dispatch() now
 424 +	 * - SUP_NO_SCHEDULER_UPDATE means nothing to do
 425 +	 * any other value means program a timer for the given time
 426 +	 */
 427 +	lt_t next_scheduler_update;
 428 +	/* set to true if a call to sup_dispatch() is imminent */
 429 +	bool will_schedule;
 430 +};
 431 +
 432 +/* Contract:
 433 + *  - before calling into sup_ code, or any reservation methods,
 434 + *    update the time with sup_update_time(); and
 435 + *  - after calling into sup_ code, or any reservation methods,
 436 + *    check next_scheduler_update and program timer or trigger
 437 + *    scheduler invocation accordingly.
 438 + */
 439 +
 440 +void sup_init(struct sup_reservation_environment* sup_env);
 441 +void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
 442 +	struct reservation* new_res);
 443 +void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
 444 +	lt_t timeout);
 445 +void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
 446 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
 447 +
 448 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
 449 +	unsigned int id);
 450 +	
 451 +/* A global multiprocessor reservation environment. */
 452 +
 453 +typedef enum {
 454 +	EVENT_REPLENISH = 0,
 455 +	EVENT_DRAIN,
 456 +	EVENT_OTHERS,
 457 +} event_type_t;
 458 +
 459 +
 460 +struct next_timer_event {
 461 +	lt_t next_update;
 462 +	int timer_armed_on;
 463 +	unsigned int id;
 464 +	event_type_t type;
 465 +	struct list_head list;
 466 +};
 467 +
 468 +struct gmp_reservation_environment {
 469 +	raw_spinlock_t lock;
 470 +	struct reservation_environment env;
 471 +
 472 +	/* ordered by priority */
 473 +	struct list_head active_reservations;
 474 +
 475 +	/* ordered by next_replenishment */
 476 +	struct list_head depleted_reservations;
 477 +
 478 +	/* unordered */
 479 +	struct list_head inactive_reservations;
 480 +
 481 +	/* timer event ordered by next_update */
 482 +	struct list_head next_events;
 483 +	
 484 +	/* (schedule_now == true) means call gmp_dispatch() now */
 485 +	int schedule_now;
 486 +	/* set to true if a call to gmp_dispatch() is imminent */
 487 +	bool will_schedule;
 488 +};
 489 +
 490 +void gmp_init(struct gmp_reservation_environment* gmp_env);
 491 +void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
 492 +	struct reservation* new_res);
 493 +void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
 494 +	lt_t timeout, unsigned int id, event_type_t type);
 495 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
 496 +int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
 497 +struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
 498 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
 499 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
 500 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
 501 +	unsigned int id);
 502 +
 503 +#endif
 504 diff --git include/litmus/rt_param.h include/litmus/rt_param.h
 505 index 7b9a909..56de045 100644
 506 --- include/litmus/rt_param.h
 507 +++ include/litmus/rt_param.h
 508 @@ -51,6 +51,16 @@ typedef enum {
 509  	TASK_EARLY
 510  } release_policy_t;
 511  
 512 +#ifdef CONFIG_PGMRT_SUPPORT
 513 +typedef enum {
 514 +    PGM_NOT_A_NODE,
 515 +    PGM_SRC,
 516 +    PGM_SINK,
 517 +    PGM_SRC_SINK,
 518 +    PGM_INTERNAL
 519 +} pgm_node_type_t;
 520 +#endif
 521 +
 522  /* We use the common priority interpretation "lower index == higher priority",
 523   * which is commonly used in fixed-priority schedulability analysis papers.
 524   * So, a numerically lower priority value implies higher scheduling priority,
 525 @@ -62,6 +72,7 @@ typedef enum {
 526  #define LITMUS_MAX_PRIORITY     512
 527  #define LITMUS_HIGHEST_PRIORITY   1
 528  #define LITMUS_LOWEST_PRIORITY    (LITMUS_MAX_PRIORITY - 1)
 529 +#define LITMUS_NO_PRIORITY		UINT_MAX
 530  
 531  /* Provide generic comparison macros for userspace,
 532   * in case that we change this later. */
 533 @@ -71,6 +82,46 @@ typedef enum {
 534  	((p) >= LITMUS_HIGHEST_PRIORITY &&	\
 535  	 (p) <= LITMUS_LOWEST_PRIORITY)
 536  
 537 +/* reservation support */
 538 +
 539 +typedef enum {
 540 +	PERIODIC_POLLING,
 541 +	SPORADIC_POLLING,
 542 +	TABLE_DRIVEN,
 543 +} reservation_type_t;
 544 +
 545 +struct lt_interval {
 546 +	lt_t start;
 547 +	lt_t end;
 548 +};
 549 +
 550 +#ifndef __KERNEL__
 551 +#define __user
 552 +#endif
 553 +
 554 +struct reservation_config {
 555 +	unsigned int id;
 556 +	lt_t priority;
 557 +	int  cpu;
 558 +
 559 +	union {
 560 +		struct {
 561 +			lt_t period;
 562 +			lt_t budget;
 563 +			lt_t relative_deadline;
 564 +			lt_t offset;
 565 +		} polling_params;
 566 +
 567 +		struct {
 568 +			lt_t major_cycle_length;
 569 +			unsigned int num_intervals;
 570 +			struct lt_interval __user *intervals;
 571 +		} table_driven_params;
 572 +	};
 573 +};
 574 +
 575 +/* regular sporadic task support */
 576 +
 577  struct rt_task {
 578  	lt_t 		exec_cost;
 579  	lt_t 		period;
 580 @@ -81,6 +132,10 @@ struct rt_task {
 581  	task_class_t	cls;
 582  	budget_policy_t  budget_policy;  /* ignored by pfair */
 583  	release_policy_t release_policy;
 584 +#ifdef CONFIG_PGMRT_SUPPORT
 585 +	pgm_node_type_t	pgm_type;
 586 +	lt_t			pgm_expected_etoe;
 587 +#endif
 588  };
 589  
 590  union np_flag {
 591 @@ -121,6 +176,13 @@ struct control_page {
 592  	uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
 593  				     * started. */
 594  
 595 +#ifdef CONFIG_PGMRT_SUPPORT
 596 +    /* Flags from userspace signifying PGM wait states. */
 597 +    volatile uint32_t   pgm_waiting;    /* waiting for tokens */
 598 +    volatile uint32_t   pgm_sending;    /* sending tokens */
 599 +    volatile uint32_t   pgm_satisfied;  /* done waiting/sending */
 600 +#endif
 601 +
 602  	/* to be extended */
 603  };
 604  
 605 @@ -165,6 +227,7 @@ struct rt_job {
 606  };
 607  
 608  struct pfair_param;
 609 +struct mc2_task;
 610  
 611  /*	RT task parameters for scheduling extensions
 612   *	These parameters are inherited during clone and therefore must
 613 @@ -246,7 +309,10 @@ struct rt_param {
 614  	volatile int		linked_on;
 615  
 616  	/* PFAIR/PD^2 state. Allocated on demand. */
 617 -	struct pfair_param*	pfair;
 618 +	union {
 619 +		void *plugin_state;
 620 +		struct pfair_param *pfair;
 621 +	};
 622  
 623  	/* Fields saved before BE->RT transition.
 624  	 */
 625 @@ -275,6 +341,10 @@ struct rt_param {
 626  
 627  	/* Pointer to the page shared between userspace and kernel. */
 628  	struct control_page * ctrl_page;
 629 +
 630 +	/* Mixed-criticality specific data */
 631 +	struct mc2_task* mc2_data;
 632 +	unsigned long addr_ctrl_page;
 633  };
 634  
 635  #endif
 636 diff --git include/litmus/sched_plugin.h include/litmus/sched_plugin.h
 637 index f36bb38..4c8aaa6 100644
 638 --- include/litmus/sched_plugin.h
 639 +++ include/litmus/sched_plugin.h
 640 @@ -83,6 +83,10 @@ typedef void (*synchronous_release_at_t)(lt_t time_zero);
 641   * reservation-specific values. */
 642  typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining);
 643  
 644 +/* Reservation creation/removal backends. Meaning of reservation_type and
 645 + * reservation_id are entirely plugin-specific. */
 646 +typedef long (*reservation_create_t)(int reservation_type, void* __user config);
 647 +typedef long (*reservation_destroy_t)(unsigned int reservation_id, int cpu);
 648  
 649  /************************ misc routines ***********************/
 650  
 651 @@ -118,6 +122,10 @@ struct sched_plugin {
 652  
 653  	current_budget_t	current_budget;
 654  
 655 +	/* Reservation support */
 656 +	reservation_create_t	reservation_create;
 657 +	reservation_destroy_t	reservation_destroy;
 658 +
 659  #ifdef CONFIG_LITMUS_LOCKING
 660  	/*	locking protocols	*/
 661  	allocate_lock_t		allocate_lock;
 662 diff --git include/litmus/trace.h include/litmus/trace.h
 663 index 6017872..24ca412 100644
 664 --- include/litmus/trace.h
 665 +++ include/litmus/trace.h
 666 @@ -118,6 +118,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 667  #define TS_TICK_START(t)		CPU_TTIMESTAMP(110, t)
 668  #define TS_TICK_END(t) 			CPU_TTIMESTAMP(111, t)
 669  
 670 +#define TS_RELEASE_C_START		CPU_DTIMESTAMP(108, TSK_RT)
 671 +#define TS_RELEASE_C_END		CPU_DTIMESTAMP(109, TSK_RT)
 672 +
 673  #define TS_QUANTUM_BOUNDARY_START	CPU_TIMESTAMP_CUR(112)
 674  #define TS_QUANTUM_BOUNDARY_END		CPU_TIMESTAMP_CUR(113)
 675  
 676 @@ -137,6 +140,17 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 677  #define TS_SEND_RESCHED_START(c)	MSG_TIMESTAMP_SENT(190, c)
 678  #define TS_SEND_RESCHED_END		MSG_TIMESTAMP_RECEIVED(191)
 679  
 680 -#define TS_RELEASE_LATENCY(when)	CPU_LTIMESTAMP(208, &(when))
 681 +#define TS_ISR_START			CPU_TIMESTAMP_CUR(192)
 682 +#define TS_ISR_END				CPU_TIMESTAMP_CUR(193)
 683 +
 684 +#define TS_RELEASE_LATENCY(when)    CPU_LTIMESTAMP(208, &(when))
 685 +#define TS_RELEASE_LATENCY_A(when)  CPU_LTIMESTAMP(209, &(when))
 686 +#define TS_RELEASE_LATENCY_B(when)  CPU_LTIMESTAMP(210, &(when))
 687 +#define TS_RELEASE_LATENCY_C(when)  CPU_LTIMESTAMP(211, &(when))
 688 +
 689 +#define TS_SCHED_A_START			CPU_DTIMESTAMP(212, TSK_UNKNOWN)
 690 +#define TS_SCHED_A_END(t)			CPU_TTIMESTAMP(213, t)
 691 +#define TS_SCHED_C_START			CPU_DTIMESTAMP(214, TSK_UNKNOWN)
 692 +#define TS_SCHED_C_END(t)			CPU_TTIMESTAMP(215, t)
 693  
 694  #endif /* !_SYS_TRACE_H_ */
 695 diff --git include/litmus/unistd_32.h include/litmus/unistd_32.h
 696 index 570b1f5..86bbbb8d 100644
 697 --- include/litmus/unistd_32.h
 698 +++ include/litmus/unistd_32.h
 699 @@ -18,5 +18,12 @@
 700  #define __NR_release_ts		__LSC(10)
 701  #define __NR_null_call		__LSC(11)
 702  #define __NR_get_current_budget __LSC(12)
 703 +#define __NR_reservation_create	__LSC(13)
 704 +#define __NR_reservation_destroy	__LSC(14)
 705 +#define __NR_set_mc2_task_param	__LSC(15)
 706 +#define __NR_set_page_color		__LSC(16)
 707 +#define __NR_test_call		__LSC(17)
 708 +#define __NR_run_test		__LSC(18)
 709 +#define __NR_lock_buffer	__LSC(19)
 710  
 711 -#define NR_litmus_syscalls	13
 712 +#define NR_litmus_syscalls	20
 713 diff --git include/litmus/unistd_64.h include/litmus/unistd_64.h
 714 index 3096bf2..4b96e7c 100644
 715 --- include/litmus/unistd_64.h
 716 +++ include/litmus/unistd_64.h
 717 @@ -30,6 +30,21 @@ __SYSCALL(__NR_release_ts, sys_release_ts)
 718  #define __NR_null_call				__LSC(11)
 719  __SYSCALL(__NR_null_call, sys_null_call)
 720  #define __NR_get_current_budget			__LSC(12)
 721 -__SYSCALL(____NR_get_current_budget, sys_get_current_budget)
 722 +__SYSCALL(__NR_get_current_budget, sys_get_current_budget)
 723 +#define __NR_reservation_create		__LSC(13)
 724 +__SYSCALL(__NR_reservation_create, sys_reservation_create)
 725 +#define __NR_reservation_destroy	__LSC(14)
 726 +__SYSCALL(__NR_reservation_destroy, sys_reservation_destroy)
 727 +#define __NR_set_mc2_task_param		__LSC(15)
 728 +__SYSCALL(__NR_set_mc2_task_param,	sys_set_mc2_task_param)
 729 +#define __NR_set_page_color			__LSC(16)
 730 +__SYSCALL(__NR_set_page_color,		sys_set_page_color)
 731 +#define __NR_test_call				__LSC(17)
 732 +__SYSCALL(__NR_test_call, sys_test_call)
 733 +#define __NR_run_test				__LSC(18)
 734 +__SYSCALL(__NR_run_test, sys_run_test)
 735 +#define __NR_lock_buffer			__LSC(19)
 736 +__SYACALL(__NR_lock_buffer, sys_lock_buffer)
 737  
 738 -#define NR_litmus_syscalls 13
 739 +
 740 +#define NR_litmus_syscalls 20
 741 diff --git litmus/Makefile litmus/Makefile
 742 index 7970cd5..e274409 100644
 743 --- litmus/Makefile
 744 +++ litmus/Makefile
 745 @@ -11,6 +11,7 @@ obj-y     = sched_plugin.o litmus.o \
 746  	    sync.o \
 747  	    rt_domain.o \
 748  	    edf_common.o \
 749 +		mc2_common.o \
 750  	    fp_common.o \
 751  	    fdso.o \
 752  	    locking.o \
 753 @@ -19,13 +20,18 @@ obj-y     = sched_plugin.o litmus.o \
 754  	    binheap.o \
 755  	    ctrldev.o \
 756  	    uncachedev.o \
 757 +		reservation.o \
 758 +		polling_reservations.o \
 759  	    sched_gsn_edf.o \
 760  	    sched_psn_edf.o \
 761 -	    sched_pfp.o
 762 +	    sched_pfp.o \
 763 +		sched_mc2.o \
 764 +		bank_proc.o \
 765 +	    color_shm.o \
 766 +		cache_proc.o
 767  
 768  obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
 769  obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
 770 -
 771  obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
 772  obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
 773  obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
 774 diff --git litmus/bank_proc.c litmus/bank_proc.c
 775 new file mode 100644
 776 index 0000000..6103611
 777 --- /dev/null
 778 +++ litmus/bank_proc.c
 779 @@ -0,0 +1,737 @@
 780 +/*
 781 + * bank_proc.c -- Implementation of the page coloring for cache and bank partition. 
 782 + *                The file will keep a pool of colored pages. Users can require pages with 
 783 + *		  specific color or bank number.
 784 + *                Part of the code is modified from Jonathan Herman's code  
 785 + */
 786 +#include <linux/init.h>
 787 +#include <linux/types.h>
 788 +#include <linux/kernel.h>
 789 +#include <linux/module.h>
 790 +#include <linux/sysctl.h>
 791 +#include <linux/slab.h>
 792 +#include <linux/io.h>
 793 +#include <linux/mutex.h>
 794 +#include <linux/mm.h>
 795 +#include <linux/random.h>
 796 +
 797 +#include <litmus/litmus_proc.h>
 798 +#include <litmus/sched_trace.h>
 799 +#include <litmus/litmus.h>
 800 +
 801 +#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
 802 +
 803 +// This Address Decoding is used in imx6-sabredsd platform
 804 +#define BANK_MASK  0x38000000     
 805 +#define BANK_SHIFT  27
 806 +#define CACHE_MASK  0x0000f000      
 807 +#define CACHE_SHIFT 12
 808 +
 809 +#define PAGES_PER_COLOR 1024
 810 +unsigned int NUM_PAGE_LIST;  //8*16
 811 +
 812 +unsigned int number_banks;
 813 +unsigned int number_cachecolors;
 814 +
 815 +unsigned int set_partition_max = 0x0000ffff;
 816 +unsigned int set_partition_min = 0;
 817 +unsigned int bank_partition_max = 0x000000ff;
 818 +unsigned int bank_partition_min = 0;
 819 +
 820 +int show_page_pool = 0;
 821 +int refill_page_pool = 0;
 822 +spinlock_t reclaim_lock;
 823 +
 824 +unsigned int set_partition[9] = {
 825 +        0x00000003,  /* Core 0, and Level A*/
 826 +        0x00000003,  /* Core 0, and Level B*/
 827 +        0x0000000C,  /* Core 1, and Level A*/
 828 +        0x0000000C,  /* Core 1, and Level B*/
 829 +        0x00000030,  /* Core 2, and Level A*/
 830 +        0x00000030,  /* Core 2, and Level B*/
 831 +        0x000000C0,  /* Core 3, and Level A*/
 832 +        0x000000C0,  /* Core 3, and Level B*/
 833 +        0x0000ff00,  /* Level C */
 834 +};
 835 +
 836 +unsigned int bank_partition[9] = {
 837 +        0x00000010,  /* Core 0, and Level A*/
 838 +        0x00000010,  /* Core 0, and Level B*/
 839 +        0x00000020,  /* Core 1, and Level A*/
 840 +        0x00000020,  /* Core 1, and Level B*/
 841 +        0x00000040,  /* Core 2, and Level A*/
 842 +        0x00000040,  /* Core 2, and Level B*/
 843 +        0x00000080,  /* Core 3, and Level A*/
 844 +        0x00000080,  /* Core 3, and Level B*/
 845 +        0x0000000c,  /* Level C */
 846 +};
 847 +
 848 +unsigned int set_index[9] = {
 849 +    0, 0, 0, 0, 0, 0, 0, 0, 0
 850 +};
 851 +
 852 +unsigned int bank_index[9] = {
 853 +    0, 0, 0, 0, 0, 0, 0, 0, 0
 854 +};
 855 +
 856 +struct mutex void_lockdown_proc;
 857 +
 858 +
 859 +/*
 860 + * Every page list should contain a lock, a list, and a number recording how many pages it store
 861 + */ 
 862 +struct color_group {
 863 +	spinlock_t lock;
 864 +	char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
 865 +	struct list_head list;
 866 +	atomic_t nr_pages;
 867 +};
 868 +
 869 +
 870 +static struct color_group *color_groups;
 871 +
 872 +/*
 873 + * Naive function to count the number of 1's
 874 + */
 875 +unsigned int counting_one_set(unsigned int v)
 876 +{
 877 +//    unsigned int v; // count the number of bits set in v
 878 +    unsigned int c; // c accumulates the total bits set in v
 879 +
 880 +    for (c = 0; v; v >>= 1)
 881 +    {
 882 +        c += v & 1;
 883 +    }
 884 +    return c;
 885 +}
 886 +
 887 +unsigned int two_exp(unsigned int e)
 888 +{
 889 +    unsigned int v = 1;
 890 +    for (; e>0; e-- )
 891 +    {
 892 +        v=v*2;
 893 +    }
 894 +    return v;
 895 +}
 896 +
 897 +unsigned int num_by_bitmask_index(unsigned int bitmask, unsigned int index)
 898 +{
 899 +    unsigned int pos = 0;
 900 +
 901 +    while(true)
 902 +    {
 903 +        if(index ==0 && (bitmask & 1)==1)
 904 +        {
 905 +            break;
 906 +        }
 907 +        if(index !=0 && (bitmask & 1)==1){
 908 +            index--;
 909 +        }
 910 +        pos++;
 911 +        bitmask = bitmask >>1;
 912 +
 913 +    }
 914 +    return pos;
 915 +}
 916 +
 917 +
 918 +
 919 +/* Decoding page color, 0~15 */ 
 920 +static inline unsigned int page_color(struct page *page)
 921 +{
 922 +	return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
 923 +}
 924 +
 925 +/* Decoding page bank number, 0~7 */ 
 926 +static inline unsigned int page_bank(struct page *page)
 927 +{
 928 +	return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
 929 +}
 930 +
 931 +static inline unsigned int page_list_index(struct page *page)
 932 +{
 933 +    unsigned int idx;  
 934 +    idx = (page_color(page) + page_bank(page)*(number_cachecolors));
 935 +//    printk("address = %lx, ", page_to_phys(page));
 936 +//    printk("color(%d), bank(%d), indx = %d\n", page_color(page), page_bank(page), idx);
 937 +
 938 +    return idx; 
 939 +}
 940 +
 941 +
 942 +
 943 +/*
 944 + * It is used to determine the smallest number of page lists. 
 945 + */
 946 +static unsigned long smallest_nr_pages(void)
 947 +{
 948 +	unsigned long i, min_pages;
 949 +	struct color_group *cgroup;
 950 +	cgroup = &color_groups[16*2];
 951 +	min_pages =atomic_read(&cgroup->nr_pages); 
 952 +	for (i = 16*2; i < NUM_PAGE_LIST; ++i) {
 953 +		cgroup = &color_groups[i];
 954 +		if (atomic_read(&cgroup->nr_pages) < min_pages)
 955 +			min_pages = atomic_read(&cgroup->nr_pages);
 956 +	}
 957 +	return min_pages;
 958 +}
 959 +
 960 +static void show_nr_pages(void)
 961 +{
 962 +	unsigned long i;
 963 +	struct color_group *cgroup;
 964 +	printk("show nr pages***************************************\n");
 965 +	for (i = 0; i < NUM_PAGE_LIST; ++i) {
 966 +		cgroup = &color_groups[i];
 967 +		printk("(%03d) =  %03d, ", i, atomic_read(&cgroup->nr_pages));
 968 +		if((i % 8) ==7){
 969 +		    printk("\n");
 970 +                }
 971 +	}
 972 +}
 973 +
 974 +/*
 975 + * Add a page to current pool.
 976 + */
 977 +void add_page_to_color_list(struct page *page)
 978 +{
 979 +	const unsigned long color = page_list_index(page);
 980 +	struct color_group *cgroup = &color_groups[color];
 981 +	BUG_ON(in_list(&page->lru) || PageLRU(page));
 982 +	BUG_ON(page_count(page) > 1);
 983 +	spin_lock(&cgroup->lock);
 984 +	list_add_tail(&page->lru, &cgroup->list);
 985 +	atomic_inc(&cgroup->nr_pages);
 986 +	SetPageLRU(page);
 987 +	spin_unlock(&cgroup->lock);
 988 +}
 989 +
 990 +/*
 991 + * Replenish the page pool. 
 992 + * If the newly allocate page is what we want, it will be pushed to the correct page list
 993 + * otherwise, it will be freed. 
 994 + */
 995 +static int do_add_pages(void)
 996 +{
 997 +	//printk("LITMUS do add pages\n");
 998 +	
 999 +	struct page *page, *page_tmp;
1000 +	LIST_HEAD(free_later);
1001 +	unsigned long color;
1002 +	int ret = 0;
1003 +	int i = 0;
1004 +	int free_counter = 0;
1005 +	unsigned long counter[128]= {0}; 
1006 +        
1007 +        //printk("Before refill : \n");
1008 +        //show_nr_pages();
1009 +
1010 +	// until all the page lists contain enough pages 
1011 +	//for (i =0; i<5; i++) {
1012 +	for (i=0; i< 1024*100;i++) {
1013 +	//while (smallest_nr_pages() < PAGES_PER_COLOR) {
1014 +       //         printk("smallest = %d\n", smallest_nr_pages());	
1015 +		page = alloc_page(GFP_HIGHUSER_MOVABLE);
1016 +	    //    page = alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0);
1017 +	
1018 +		if (unlikely(!page)) {
1019 +			printk(KERN_WARNING "Could not allocate pages.\n");
1020 +			ret = -ENOMEM;
1021 +			goto out;
1022 +		}
1023 +		color = page_list_index(page);
1024 +		counter[color]++;
1025 +	//	printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1026 +                //show_nr_pages();
1027 +		if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) {
1028 +		//if ( PAGES_PER_COLOR && color>=16*2) {
1029 +			add_page_to_color_list(page);
1030 +	//		printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page));
1031 +		} else{
1032 +			// Pages here will be freed later 
1033 +			list_add_tail(&page->lru, &free_later);
1034 +			free_counter++;
1035 +		        //list_del(&page->lru);
1036 +		//        __free_page(page);
1037 +	//		printk("useless page(%d) = color %x, bank %x\n", color,  page_color(page), page_bank(page));
1038 +		}
1039 +               //show_nr_pages();
1040 +                /*
1041 +                if(free_counter >= PAGES_PER_COLOR)
1042 +                {
1043 +                    printk("free unwanted page list eariler");
1044 +                    free_counter = 0;
1045 +	            list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1046 +		        list_del(&page->lru);
1047 +		        __free_page(page);
1048 +	            }
1049 +
1050 +                    show_nr_pages();
1051 +                }
1052 +                */
1053 +        }
1054 +/*        printk("page counter = \n");
1055 +        for (i=0; i<128; i++)
1056 +        {
1057 +            printk("(%03d) = %4d, ", i , counter[i]);
1058 +            if(i%8 == 7){
1059 +                printk("\n");
1060 +            }
1061 +
1062 +        }
1063 +*/	
1064 +        //printk("After refill : \n");
1065 +        //show_nr_pages();
1066 +#if 1
1067 +	// Free the unwanted pages
1068 +	list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1069 +		list_del(&page->lru);
1070 +		__free_page(page);
1071 +	}
1072 +#endif
1073 +out:
1074 +        return ret;
1075 +}
1076 +
1077 +/*
1078 + * Provide pages for replacement according cache color 
1079 + * This should be the only implementation here
1080 + * This function should not be accessed by others directly. 
1081 + * 
1082 + */ 
1083 +static struct  page *new_alloc_page_color( unsigned long color)
1084 +{
1085 +//	printk("allocate new page color = %d\n", color);	
1086 +	struct color_group *cgroup;
1087 +	struct page *rPage = NULL;
1088 +		
1089 +	if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
1090 +		TRACE_CUR("Wrong color %lu\n", color);	
1091 +//		printk(KERN_WARNING "Wrong color %lu\n", color);
1092 +		goto out;
1093 +	}
1094 +
1095 +		
1096 +	cgroup = &color_groups[color];
1097 +	spin_lock(&cgroup->lock);
1098 +	if (unlikely(!atomic_read(&cgroup->nr_pages))) {
1099 +		TRACE_CUR("No free %lu colored pages.\n", color);
1100 +//		printk(KERN_WARNING "no free %lu colored pages.\n", color);
1101 +		goto out_unlock;
1102 +	}
1103 +	rPage = list_first_entry(&cgroup->list, struct page, lru);
1104 +	BUG_ON(page_count(rPage) > 1);
1105 +	//get_page(rPage);
1106 +	list_del(&rPage->lru);
1107 +	atomic_dec(&cgroup->nr_pages);
1108 +	ClearPageLRU(rPage);
1109 +out_unlock:
1110 +	spin_unlock(&cgroup->lock);
1111 +out:
1112 +	if( smallest_nr_pages() == 0)
1113 +        {
1114 +		do_add_pages();
1115 +       //     printk("ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n");        
1116 +        
1117 +        }
1118 +	return rPage;
1119 +}
1120 +
1121 +struct page* get_colored_page(unsigned long color)
1122 +{
1123 +	return new_alloc_page_color(color);
1124 +}
1125 +
1126 +/*
1127 + * provide pages for replacement according to  
1128 + * node = 0 for Level A tasks in Cpu 0
1129 + * node = 1 for Level B tasks in Cpu 0
1130 + * node = 2 for Level A tasks in Cpu 1
1131 + * node = 3 for Level B tasks in Cpu 1
1132 + * node = 4 for Level A tasks in Cpu 2
1133 + * node = 5 for Level B tasks in Cpu 2
1134 + * node = 6 for Level A tasks in Cpu 3
1135 + * node = 7 for Level B tasks in Cpu 3
1136 + * node = 8 for Level C tasks 
1137 + */
1138 +struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
1139 +{
1140 +//	printk("allocate new page node = %d\n", node);	
1141 +//	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
1142 +	struct color_group *cgroup;
1143 +	struct page *rPage = NULL;
1144 +	unsigned int color;
1145 +	
1146 +
1147 +        unsigned int idx = 0;
1148 +        idx += num_by_bitmask_index(set_partition[node], set_index[node]);
1149 +        idx += number_cachecolors* num_by_bitmask_index(bank_partition[node], bank_index[node]);
1150 +	//printk("node  = %d, idx = %d\n", node, idx);
1151 +
1152 +	rPage =  new_alloc_page_color(idx);
1153 +        
1154 +            
1155 +        set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]);
1156 +        bank_index[node] = (bank_index[node]+1) % counting_one_set(bank_partition[node]);
1157 +	return rPage; 
1158 +}
1159 +
1160 +
1161 +/*
1162 + * Reclaim pages.
1163 + */
1164 +void reclaim_page(struct page *page)
1165 +{
1166 +	const unsigned long color = page_list_index(page);
1167 +	unsigned long nr_reclaimed = 0;
1168 +	spin_lock(&reclaim_lock);
1169 +    	put_page(page);
1170 +	add_page_to_color_list(page);
1171 +
1172 +	spin_unlock(&reclaim_lock);
1173 +	printk("Reclaimed page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1174 +}
1175 +
1176 +
1177 +/*
1178 + * Initialize the numbers of banks and cache colors 
1179 + */ 
1180 +static int __init init_variables(void)
1181 +{
1182 +	number_banks = counting_one_set(BANK_MASK); 
1183 +	number_banks = two_exp(number_banks); 
1184 +
1185 +	number_cachecolors = counting_one_set(CACHE_MASK);
1186 +	number_cachecolors = two_exp(number_cachecolors);
1187 +	NUM_PAGE_LIST = number_banks * number_cachecolors; 
1188 +        printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
1189 +	mutex_init(&void_lockdown_proc);
1190 +	spin_lock_init(&reclaim_lock);
1191 +
1192 +}
1193 +
1194 +
1195 +/*
1196 + * Initialize the page pool 
1197 + */
1198 +static int __init init_color_groups(void)
1199 +{
1200 +	struct color_group *cgroup;
1201 +	unsigned long i;
1202 +	int err = 0;
1203 +
1204 +        printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
1205 +        color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
1206 +
1207 +	if (!color_groups) {
1208 +		printk(KERN_WARNING "Could not allocate color groups.\n");
1209 +		err = -ENOMEM;
1210 +	}else{
1211 +
1212 +		for (i = 0; i < NUM_PAGE_LIST; ++i) {
1213 +			cgroup = &color_groups[i];
1214 +			atomic_set(&cgroup->nr_pages, 0);
1215 +			INIT_LIST_HEAD(&cgroup->list);
1216 +			spin_lock_init(&cgroup->lock);
1217 +		}
1218 +	}
1219 +        return err;
1220 +}
1221 +
1222 +int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1223 +		size_t *lenp, loff_t *ppos)
1224 +{
1225 +	int ret = 0, i = 0;
1226 +	mutex_lock(&void_lockdown_proc);
1227 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1228 +	if (ret)
1229 +		goto out;
1230 +	if (write) {
1231 +            printk("New set Partition : \n");
1232 +	    for(i =0;i <9;i++)
1233 +            {
1234 +                set_index[i] = 0;
1235 +                printk("set[%d] = %x \n", i, set_partition[i]);
1236 +            }
1237 +	}
1238 +out:
1239 +	mutex_unlock(&void_lockdown_proc);
1240 +	return ret;
1241 +}
1242 +
1243 +int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1244 +		size_t *lenp, loff_t *ppos)
1245 +{
1246 +	int ret = 0, i = 0;
1247 +	mutex_lock(&void_lockdown_proc);
1248 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1249 +	if (ret)
1250 +		goto out;
1251 +	if (write) {
1252 +	    for(i =0;i <9;i++)
1253 +            {
1254 +                bank_index[i] = 0;
1255 +            }
1256 +	}
1257 +out:
1258 +	mutex_unlock(&void_lockdown_proc);
1259 +	return ret;
1260 +}
1261 +
1262 +int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1263 +		size_t *lenp, loff_t *ppos)
1264 +{
1265 +	int ret = 0, i = 0;
1266 +	mutex_lock(&void_lockdown_proc);
1267 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1268 +	if (ret)
1269 +		goto out;
1270 +	if (write) {
1271 +            show_nr_pages();
1272 +	}
1273 +out:
1274 +	mutex_unlock(&void_lockdown_proc);
1275 +	return ret;
1276 +}
1277 +
1278 +int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1279 +		size_t *lenp, loff_t *ppos)
1280 +{
1281 +	int ret = 0, i = 0;
1282 +	mutex_lock(&void_lockdown_proc);
1283 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1284 +	if (ret)
1285 +		goto out;
1286 +	if (write) {
1287 +            do_add_pages();
1288 +	}
1289 +out:
1290 +	mutex_unlock(&void_lockdown_proc);
1291 +	return ret;
1292 +}
1293 +
1294 +static struct ctl_table cache_table[] =
1295 +{
1296 +        
1297 +	{
1298 +		.procname	= "C0_LA_set",
1299 +		.mode		= 0666,
1300 +		.proc_handler	= set_partition_handler,
1301 +		.data		= &set_partition[0],
1302 +		.maxlen		= sizeof(set_partition[0]),
1303 +		.extra1		= &set_partition_min,
1304 +		.extra2		= &set_partition_max,
1305 +	},	
1306 +	{
1307 +		.procname	= "C0_LB_set",
1308 +		.mode		= 0666,
1309 +		.proc_handler	= set_partition_handler,
1310 +		.data		= &set_partition[1],
1311 +		.maxlen		= sizeof(set_partition[1]),
1312 +		.extra1		= &set_partition_min,
1313 +		.extra2		= &set_partition_max,
1314 +	},	
1315 +	{
1316 +		.procname	= "C1_LA_set",
1317 +		.mode		= 0666,
1318 +		.proc_handler	= set_partition_handler,
1319 +		.data		= &set_partition[2],
1320 +		.maxlen		= sizeof(set_partition[2]),
1321 +		.extra1		= &set_partition_min,
1322 +		.extra2		= &set_partition_max,
1323 +	},
1324 +	{
1325 +		.procname	= "C1_LB_set",
1326 +		.mode		= 0666,
1327 +		.proc_handler	= set_partition_handler,
1328 +		.data		= &set_partition[3],
1329 +		.maxlen		= sizeof(set_partition[3]),
1330 +		.extra1		= &set_partition_min,
1331 +		.extra2		= &set_partition_max,
1332 +	},
1333 +	{
1334 +		.procname	= "C2_LA_set",
1335 +		.mode		= 0666,
1336 +		.proc_handler	= set_partition_handler,
1337 +		.data		= &set_partition[4],
1338 +		.maxlen		= sizeof(set_partition[4]),
1339 +		.extra1		= &set_partition_min,
1340 +		.extra2		= &set_partition_max,
1341 +	},
1342 +	{
1343 +		.procname	= "C2_LB_set",
1344 +		.mode		= 0666,
1345 +		.proc_handler	= set_partition_handler,
1346 +		.data		= &set_partition[5],
1347 +		.maxlen		= sizeof(set_partition[5]),
1348 +		.extra1		= &set_partition_min,
1349 +		.extra2		= &set_partition_max,
1350 +	},
1351 +	{
1352 +		.procname	= "C3_LA_set",
1353 +		.mode		= 0666,
1354 +		.proc_handler	= set_partition_handler,
1355 +		.data		= &set_partition[6],
1356 +		.maxlen		= sizeof(set_partition[6]),
1357 +		.extra1		= &set_partition_min,
1358 +		.extra2		= &set_partition_max,
1359 +	},
1360 +	{
1361 +		.procname	= "C3_LB_set",
1362 +		.mode		= 0666,
1363 +		.proc_handler	= set_partition_handler,
1364 +		.data		= &set_partition[7],
1365 +		.maxlen		= sizeof(set_partition[7]),
1366 +		.extra1		= &set_partition_min,
1367 +		.extra2		= &set_partition_max,
1368 +	},	
1369 +	{
1370 +		.procname	= "Call_LC_set",
1371 +		.mode		= 0666,
1372 +		.proc_handler	= set_partition_handler,
1373 +		.data		= &set_partition[8],
1374 +		.maxlen		= sizeof(set_partition[8]),
1375 +		.extra1		= &set_partition_min,
1376 +		.extra2		= &set_partition_max,
1377 +	},	
1378 +	{
1379 +		.procname	= "C0_LA_bank",
1380 +		.mode		= 0666,
1381 +		.proc_handler	= bank_partition_handler,
1382 +		.data		= &bank_partition[0],
1383 +		.maxlen		= sizeof(set_partition[0]),
1384 +		.extra1		= &bank_partition_min,
1385 +		.extra2		= &bank_partition_max,
1386 +	},
1387 +	{
1388 +		.procname	= "C0_LB_bank",
1389 +		.mode		= 0666,
1390 +		.proc_handler	= bank_partition_handler,
1391 +		.data		= &bank_partition[1],
1392 +		.maxlen		= sizeof(set_partition[1]),
1393 +		.extra1		= &bank_partition_min,
1394 +		.extra2		= &bank_partition_max,
1395 +	},		
1396 +	{
1397 +		.procname	= "C1_LA_bank",
1398 +		.mode		= 0666,
1399 +		.proc_handler	= bank_partition_handler,
1400 +		.data		= &bank_partition[2],
1401 +		.maxlen		= sizeof(set_partition[2]),
1402 +		.extra1		= &bank_partition_min,
1403 +		.extra2		= &bank_partition_max,
1404 +	},
1405 +	{
1406 +		.procname	= "C1_LB_bank",
1407 +		.mode		= 0666,
1408 +		.proc_handler	= bank_partition_handler,
1409 +		.data		= &bank_partition[3],
1410 +		.maxlen		= sizeof(set_partition[3]),
1411 +		.extra1		= &bank_partition_min,
1412 +		.extra2		= &bank_partition_max,
1413 +	},
1414 +	{
1415 +		.procname	= "C2_LA_bank",
1416 +		.mode		= 0666,
1417 +		.proc_handler	= bank_partition_handler,
1418 +		.data		= &bank_partition[4],
1419 +		.maxlen		= sizeof(set_partition[4]),
1420 +		.extra1		= &bank_partition_min,
1421 +		.extra2		= &bank_partition_max,
1422 +	},	
1423 +	{
1424 +		.procname	= "C2_LB_bank",
1425 +		.mode		= 0666,
1426 +		.proc_handler	= bank_partition_handler,
1427 +		.data		= &bank_partition[5],
1428 +		.maxlen		= sizeof(set_partition[5]),
1429 +		.extra1		= &bank_partition_min,
1430 +		.extra2		= &bank_partition_max,
1431 +	},		
1432 +	{
1433 +		.procname	= "C3_LA_bank",
1434 +		.mode		= 0666,
1435 +		.proc_handler	= bank_partition_handler,
1436 +		.data		= &bank_partition[6],
1437 +		.maxlen		= sizeof(set_partition[6]),
1438 +		.extra1		= &bank_partition_min,
1439 +		.extra2		= &bank_partition_max,
1440 +	},	
1441 +	{
1442 +		.procname	= "C3_LB_bank",
1443 +		.mode		= 0666,
1444 +		.proc_handler	= bank_partition_handler,
1445 +		.data		= &bank_partition[7],
1446 +		.maxlen		= sizeof(set_partition[7]),
1447 +		.extra1		= &bank_partition_min,
1448 +		.extra2		= &bank_partition_max,
1449 +	},	
1450 +	{
1451 +		.procname	= "Call_LC_bank",
1452 +		.mode		= 0666,
1453 +		.proc_handler	= bank_partition_handler,
1454 +		.data		= &bank_partition[8],
1455 +		.maxlen		= sizeof(set_partition[8]),
1456 +		.extra1		= &bank_partition_min,
1457 +		.extra2		= &bank_partition_max,
1458 +	},	
1459 +	{
1460 +		.procname	= "show_page_pool",
1461 +		.mode		= 0666,
1462 +		.proc_handler	= show_page_pool_handler,
1463 +		.data		= &show_page_pool,
1464 +		.maxlen		= sizeof(show_page_pool),
1465 +	},		{
1466 +		.procname	= "refill_page_pool",
1467 +		.mode		= 0666,
1468 +		.proc_handler	= refill_page_pool_handler,
1469 +		.data		= &refill_page_pool,
1470 +		.maxlen		= sizeof(refill_page_pool),
1471 +	},	
1472 +	{ }
1473 +};
1474 +
1475 +static struct ctl_table litmus_dir_table[] = {
1476 +	{
1477 +		.procname	= "litmus",
1478 + 		.mode		= 0555,
1479 +		.child		= cache_table,
1480 +	},
1481 +	{ }
1482 +};
1483 +
1484 +
1485 +static struct ctl_table_header *litmus_sysctls;
1486 +
1487 +
1488 +/*
1489 + * Initialzie this proc 
1490 + */
1491 +static int __init litmus_color_init(void)
1492 +{
1493 +	int err=0;
1494 +        printk("Init bankproc.c\n");
1495 +
1496 +	init_variables();
1497 +
1498 +	printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
1499 +
1500 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
1501 +	if (!litmus_sysctls) {
1502 +		printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
1503 +		err = -EFAULT;
1504 +		goto out;
1505 +	}
1506 +
1507 +	init_color_groups();			
1508 +	do_add_pages();
1509 +
1510 +	printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
1511 +out:
1512 +	return err;
1513 +}
1514 +
1515 +module_init(litmus_color_init);
1516 +
1517 diff --git litmus/cache_proc.c litmus/cache_proc.c
1518 new file mode 100644
1519 index 0000000..afc0319
1520 --- /dev/null
1521 +++ litmus/cache_proc.c
1522 @@ -0,0 +1,1403 @@
1523 +#include <asm/uaccess.h>
1524 +#include <linux/uaccess.h>
1525 +#include <linux/init.h>
1526 +#include <linux/types.h>
1527 +#include <linux/kernel.h>
1528 +#include <linux/module.h>
1529 +#include <linux/sysctl.h>
1530 +#include <linux/slab.h>
1531 +#include <linux/io.h>
1532 +#include <linux/mutex.h>
1533 +#include <linux/time.h>
1534 +#include <linux/random.h>
1535 +
1536 +#include <litmus/litmus_proc.h>
1537 +#include <litmus/sched_trace.h>
1538 +#include <litmus/cache_proc.h>
1539 +#include <litmus/mc2_common.h>
1540 +#include <litmus/litmus.h>
1541 +
1542 +#include <asm/hardware/cache-l2x0.h>
1543 +#include <asm/cacheflush.h>
1544 +
1545 +
1546 +#define UNLOCK_ALL	0x00000000 /* allocation in any way */
1547 +#define LOCK_ALL        (~UNLOCK_ALL)
1548 +#define MAX_NR_WAYS	16
1549 +#define MAX_NR_COLORS	16
1550 +#define CACHELINE_SIZE 32
1551 +#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
1552 +#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
1553 +
1554 +typedef struct cacheline
1555 +{
1556 +        int line[INTS_IN_CACHELINE];
1557 +} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
1558 +
1559 +void mem_lock(u32 lock_val, int cpu);
1560 +
1561 +/*
1562 + * unlocked_way[i] : allocation can occur in way i
1563 + *
1564 + * 0 = allocation can occur in the corresponding way
1565 + * 1 = allocation cannot occur in the corresponding way
1566 + */
1567 +u32 unlocked_way[MAX_NR_WAYS]  = {
1568 +	0xFFFFFFFE, /* way 0 unlocked */
1569 +	0xFFFFFFFD,
1570 +	0xFFFFFFFB,
1571 +	0xFFFFFFF7,
1572 +	0xFFFFFFEF, /* way 4 unlocked */
1573 +	0xFFFFFFDF,
1574 +	0xFFFFFFBF,
1575 +	0xFFFFFF7F,
1576 +	0xFFFFFEFF, /* way 8 unlocked */
1577 +	0xFFFFFDFF,
1578 +	0xFFFFFBFF,
1579 +	0xFFFFF7FF,
1580 +	0xFFFFEFFF, /* way 12 unlocked */
1581 +	0xFFFFDFFF,
1582 +	0xFFFFBFFF,
1583 +	0xFFFF7FFF,
1584 +};
1585 +
1586 +u32 nr_unlocked_way[MAX_NR_WAYS+1]  = {
1587 +	0x0000FFFF, /* all ways are locked. usable = 0*/
1588 +	0x0000FFFE, /* way ~0 unlocked. usable = 1 */
1589 +	0x0000FFFC,
1590 +	0x0000FFF8,
1591 +	0x0000FFF0,
1592 +	0x0000FFE0,
1593 +	0x0000FFC0,
1594 +	0x0000FF80,
1595 +	0x0000FF00,
1596 +	0x0000FE00,
1597 +	0x0000FC00,
1598 +	0x0000F800,
1599 +	0x0000F000,
1600 +	0x0000E000,
1601 +	0x0000C000,
1602 +	0x00008000,
1603 +	0x00000000, /* way ~15 unlocked. usable = 16 */
1604 +};
1605 +
1606 +u32 way_partition[4] = {
1607 +	0xfffffff0, /* cpu0 */
1608 +	0xffffff0f, /* cpu1 */
1609 +	0xfffff0ff, /* cpu2 */
1610 +	0xffff0fff, /* cpu3 */
1611 +};
1612 +
1613 +u32 way_partitions[9] = {
1614 +	0xffff0003, /* cpu0 A */
1615 +	0xffff0003, /* cpu0 B */
1616 +	0xffff000C, /* cpu1 A */
1617 +	0xffff000C, /* cpu1 B */
1618 +	0xffff0030, /* cpu2 A */
1619 +	0xffff0030, /* cpu2 B */
1620 +	0xffff00C0, /* cpu3 A */
1621 +	0xffff00C0, /* cpu3 B */
1622 +	0xffffff00, /* lv C */
1623 +};
1624 +
1625 +u32 prev_lockdown_d_reg[5] = {
1626 +	0x0000FF00,
1627 +	0x0000FF00,
1628 +	0x0000FF00,
1629 +	0x0000FF00,
1630 +	0x000000FF, /* share with level-C */
1631 +};
1632 +
1633 +u32 prev_lockdown_i_reg[5] = {
1634 +	0x0000FF00,
1635 +	0x0000FF00,
1636 +	0x0000FF00,
1637 +	0x0000FF00,
1638 +	0x000000FF, /* share with level-C */
1639 +};
1640 +
1641 +u32 prev_lbm_i_reg[8] = {
1642 +	0x00000000,
1643 +	0x00000000,
1644 +	0x00000000,
1645 +	0x00000000,
1646 +	0x00000000,
1647 +	0x00000000,
1648 +	0x00000000,
1649 +	0x00000000,
1650 +};
1651 +
1652 +u32 prev_lbm_d_reg[8] = {
1653 +	0x00000000,
1654 +	0x00000000,
1655 +	0x00000000,
1656 +	0x00000000,
1657 +	0x00000000,
1658 +	0x00000000,
1659 +	0x00000000,
1660 +	0x00000000,
1661 +};
1662 +
1663 +static void __iomem *cache_base;
1664 +static void __iomem *lockreg_d;
1665 +static void __iomem *lockreg_i;
1666 +
1667 +static u32 cache_id;
1668 +
1669 +struct mutex actlr_mutex;
1670 +struct mutex l2x0_prefetch_mutex;
1671 +struct mutex lockdown_proc;
1672 +static u32 way_partition_min;
1673 +static u32 way_partition_max;
1674 +
1675 +static int zero = 0;
1676 +static int one = 1;
1677 +
1678 +static int l1_prefetch_proc;
1679 +static int l2_prefetch_hint_proc;
1680 +static int l2_double_linefill_proc;
1681 +static int l2_data_prefetch_proc;
1682 +static int os_isolation;
1683 +static int use_part;
1684 +
1685 +u32 lockdown_reg[9] = {
1686 +	0x00000000,
1687 +	0x00000000,
1688 +	0x00000000,
1689 +	0x00000000,
1690 +	0x00000000,
1691 +	0x00000000,
1692 +	0x00000000,
1693 +	0x00000000,
1694 +};
1695 +	
1696 +
1697 +#define ld_d_reg(cpu) ({ int __cpu = cpu; \
1698 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
1699 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
1700 +#define ld_i_reg(cpu) ({ int __cpu = cpu; \
1701 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
1702 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
1703 +
1704 +int lock_all;
1705 +int nr_lockregs;
1706 +static raw_spinlock_t cache_lock;
1707 +static raw_spinlock_t prefetch_lock;
1708 +static void ***flusher_pages = NULL;
1709 +
1710 +extern void l2c310_flush_all(void);
1711 +
1712 +static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
1713 +{
1714 +	/* wait for cache operation by line or way to complete */
1715 +	while (readl_relaxed(reg) & mask)
1716 +		cpu_relax();
1717 +}
1718 +
1719 +#ifdef CONFIG_CACHE_L2X0 
1720 +static inline void cache_wait(void __iomem *reg, unsigned long mask)
1721 +{
1722 +	/* cache operations by line are atomic on PL310 */
1723 +}
1724 +#else
1725 +#define cache_wait	cache_wait_way
1726 +#endif
1727 +
1728 +static inline void cache_sync(void)
1729 +{
1730 +	void __iomem *base = cache_base;
1731 +
1732 +	writel_relaxed(0, base + L2X0_CACHE_SYNC);
1733 +	cache_wait(base + L2X0_CACHE_SYNC, 1);
1734 +}
1735 +
1736 +static void print_lockdown_registers(int cpu)
1737 +{
1738 +	int i;
1739 +	//for (i = 0; i < nr_lockregs; i++) {
1740 +	for (i = 0; i < 4; i++) {
1741 +		printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
1742 +				i, readl_relaxed(ld_d_reg(i)));
1743 +		printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
1744 +				i, readl_relaxed(ld_i_reg(i)));
1745 +	}
1746 +}
1747 +
1748 +static void test_lockdown(void *ignore)
1749 +{
1750 +	int i, cpu;
1751 +
1752 +	cpu = smp_processor_id();
1753 +	printk("Start lockdown test on CPU %d.\n", cpu);
1754 +
1755 +	for (i = 0; i < nr_lockregs; i++) {
1756 +		printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
1757 +		printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i));
1758 +	}
1759 +
1760 +	printk("Lockdown initial state:\n");
1761 +	print_lockdown_registers(cpu);
1762 +	printk("---\n");
1763 +
1764 +	for (i = 0; i < nr_lockregs; i++) {
1765 +		writel_relaxed(1, ld_d_reg(i));
1766 +		writel_relaxed(2, ld_i_reg(i));
1767 +	}
1768 +	printk("Lockdown all data=1 instr=2:\n");
1769 +	print_lockdown_registers(cpu);
1770 +	printk("---\n");
1771 +
1772 +	for (i = 0; i < nr_lockregs; i++) {
1773 +		writel_relaxed((1 << i), ld_d_reg(i));
1774 +		writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
1775 +	}
1776 +	printk("Lockdown varies:\n");
1777 +	print_lockdown_registers(cpu);
1778 +	printk("---\n");
1779 +
1780 +	for (i = 0; i < nr_lockregs; i++) {
1781 +		writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
1782 +		writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
1783 +	}
1784 +	printk("Lockdown all zero:\n");
1785 +	print_lockdown_registers(cpu);
1786 +
1787 +	printk("End lockdown test.\n");
1788 +}
1789 +
1790 +void litmus_setup_lockdown(void __iomem *base, u32 id)
1791 +{
1792 +	cache_base = base;
1793 +	cache_id = id;
1794 +	lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
1795 +	lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
1796 +    
1797 +	if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
1798 +		nr_lockregs = 8;
1799 +	} else {
1800 +		printk("Unknown cache ID!\n");
1801 +		nr_lockregs = 1;
1802 +	}
1803 +	
1804 +	mutex_init(&actlr_mutex);
1805 +	mutex_init(&l2x0_prefetch_mutex);
1806 +	mutex_init(&lockdown_proc);
1807 +	raw_spin_lock_init(&cache_lock);
1808 +	raw_spin_lock_init(&prefetch_lock);
1809 +	
1810 +	test_lockdown(NULL);
1811 +}
1812 +
1813 +int way_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1814 +		size_t *lenp, loff_t *ppos)
1815 +{
1816 +	int ret = 0, i;
1817 +	unsigned long flags;
1818 +	
1819 +	mutex_lock(&lockdown_proc);
1820 +	
1821 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1822 +	if (ret)
1823 +		goto out;
1824 +	
1825 +	if (write) {
1826 +		printk("Way-partition settings:\n");
1827 +		for (i = 0; i < 9; i++) {
1828 +			printk("0x%08X\n", way_partitions[i]);
1829 +		}
1830 +		for (i = 0; i < 4; i++) {
1831 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
1832 +				       i * L2X0_LOCKDOWN_STRIDE);
1833 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
1834 +				       i * L2X0_LOCKDOWN_STRIDE);
1835 +		}
1836 +	}
1837 +	
1838 +	local_irq_save(flags);
1839 +	print_lockdown_registers(smp_processor_id());
1840 +	l2c310_flush_all();
1841 +	local_irq_restore(flags);
1842 +out:
1843 +	mutex_unlock(&lockdown_proc);
1844 +	return ret;
1845 +}
1846 +
1847 +int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
1848 +		size_t *lenp, loff_t *ppos)
1849 +{
1850 +	int ret = 0, i;
1851 +	unsigned long flags;
1852 +	
1853 +	mutex_lock(&lockdown_proc);
1854 +	
1855 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1856 +	if (ret)
1857 +		goto out;
1858 +	
1859 +	if (write && lock_all == 1) {
1860 +		for (i = 0; i < nr_lockregs; i++) {
1861 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
1862 +				       i * L2X0_LOCKDOWN_STRIDE);
1863 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
1864 +				       i * L2X0_LOCKDOWN_STRIDE);
1865 +		}
1866 +/*		
1867 +		for (i = 0; i < nr_lockregs;  i++) {
1868 +			barrier();
1869 +			mem_lock(LOCK_ALL, i);
1870 +			barrier();
1871 +			//writel_relaxed(nr_unlocked_way[0], ld_d_reg(i));
1872 +			//writel_relaxed(nr_unlocked_way[0], ld_i_reg(i));
1873 +		}
1874 +*/		
1875 +	}
1876 +	if (write && lock_all == 0) {
1877 +		for (i = 0; i < nr_lockregs; i++) {
1878 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
1879 +				       i * L2X0_LOCKDOWN_STRIDE);
1880 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
1881 +				       i * L2X0_LOCKDOWN_STRIDE);
1882 +		}
1883 +/*
1884 +		for (i = 0; i < nr_lockregs;  i++) {
1885 +			barrier();
1886 +			mem_lock(UNLOCK_ALL, i);
1887 +			barrier();
1888 +			//writel_relaxed(nr_unlocked_way[16], ld_d_reg(i));
1889 +			//writel_relaxed(nr_unlocked_way[16], ld_i_reg(i));
1890 +		}
1891 +*/
1892 +	}
1893 +	printk("LOCK_ALL HANDLER\n");
1894 +	local_irq_save(flags);
1895 +	print_lockdown_registers(smp_processor_id());
1896 +	l2c310_flush_all();
1897 +	local_irq_restore(flags);
1898 +out:
1899 +	mutex_unlock(&lockdown_proc);
1900 +	return ret;
1901 +}
1902 +
1903 +void cache_lockdown(u32 lock_val, int cpu)
1904 +{
1905 +	//unsigned long flags;
1906 +	//raw_spin_lock_irqsave(&cache_lock, flags);
1907 +
1908 +	__asm__ __volatile__ (
1909 +"	str	%[lockval], [%[dcachereg]]\n"
1910 +"	str	%[lockval], [%[icachereg]]\n"
1911 +	: 
1912 +	: [dcachereg] "r" (ld_d_reg(cpu)),
1913 +	  [icachereg] "r" (ld_i_reg(cpu)),
1914 +	  [lockval] "r" (lock_val)
1915 +	: "cc");
1916 +
1917 +	//raw_spin_unlock_irqrestore(&cache_lock, flags);
1918 +}
1919 +
1920 +void do_partition(enum crit_level lv, int cpu)
1921 +{
1922 +	u32 regs;
1923 +	unsigned long flags;
1924 +	
1925 +	if (lock_all || !use_part)
1926 +		return;
1927 +	raw_spin_lock_irqsave(&cache_lock, flags);
1928 +	switch(lv) {
1929 +		case CRIT_LEVEL_A:
1930 +			regs = ~way_partitions[cpu*2];
1931 +			regs &= 0x0000ffff;
1932 +			break;
1933 +		case CRIT_LEVEL_B:
1934 +			regs = ~way_partitions[cpu*2+1];
1935 +			regs &= 0x0000ffff;
1936 +			break;
1937 +		case CRIT_LEVEL_C:
1938 +		case NUM_CRIT_LEVELS:
1939 +			regs = ~way_partitions[8];
1940 +			regs &= 0x0000ffff;
1941 +			break;
1942 +		default:
1943 +			BUG();
1944 +
1945 +	}
1946 +	barrier();
1947 +	//cache_lockdown(regs, cpu);
1948 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
1949 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
1950 +	barrier();
1951 +
1952 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
1953 +	
1954 +	flush_cache(0);
1955 +}
1956 +
1957 +void lock_cache(int cpu, u32 val)
1958 +{
1959 +	unsigned long flags;
1960 +	
1961 +	local_irq_save(flags);
1962 +	if (val != 0xffffffff) {
1963 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
1964 +					   cpu * L2X0_LOCKDOWN_STRIDE);
1965 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
1966 +					   cpu * L2X0_LOCKDOWN_STRIDE);
1967 +	}
1968 +	else {
1969 +		int i;
1970 +		for (i = 0; i < 4; i++)
1971 +			do_partition(CRIT_LEVEL_A, i);
1972 +	}
1973 +	local_irq_restore(flags);
1974 +}
1975 +
1976 +int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
1977 +		size_t *lenp, loff_t *ppos)
1978 +{
1979 +	int ret = 0;
1980 +	
1981 +	mutex_lock(&lockdown_proc);
1982 +
1983 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1984 +	if (ret)
1985 +		goto out;
1986 +	
1987 +
1988 +	printk("USE_PART HANDLER = %d\n", use_part);
1989 +
1990 +out:
1991 +	mutex_unlock(&lockdown_proc);
1992 +	return ret;
1993 +}
1994 +
1995 +int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
1996 +		size_t *lenp, loff_t *ppos)
1997 +{
1998 +	int ret = 0;
1999 +	
2000 +	mutex_lock(&lockdown_proc);
2001 +	
2002 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2003 +	if (ret)
2004 +		goto out;
2005 +	
2006 +
2007 +	printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
2008 +
2009 +out:
2010 +	mutex_unlock(&lockdown_proc);
2011 +	return ret;
2012 +}
2013 +
2014 +int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
2015 +		size_t *lenp, loff_t *ppos)
2016 +{
2017 +	int ret = 0, i;
2018 +	
2019 +	mutex_lock(&lockdown_proc);
2020 +	
2021 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2022 +	if (ret)
2023 +		goto out;
2024 +	
2025 +	if (write) {
2026 +		for (i = 0; i < nr_lockregs; i++) {
2027 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2028 +				       i * L2X0_LOCKDOWN_STRIDE);
2029 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2030 +				       i * L2X0_LOCKDOWN_STRIDE);
2031 +		}
2032 +	}
2033 +
2034 +out:
2035 +	mutex_unlock(&lockdown_proc);
2036 +	return ret;
2037 +}
2038 +
2039 +int lockdown_global_handler(struct ctl_table *table, int write, void __user *buffer,
2040 +		size_t *lenp, loff_t *ppos)
2041 +{
2042 +	int ret = 0, i;
2043 +	
2044 +	mutex_lock(&lockdown_proc);
2045 +	
2046 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2047 +	if (ret)
2048 +		goto out;
2049 +	
2050 +	if (write) {
2051 +		for (i = 0; i < nr_lockregs; i++) {
2052 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2053 +				       i * L2X0_LOCKDOWN_STRIDE);
2054 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2055 +				       i * L2X0_LOCKDOWN_STRIDE);
2056 +		}
2057 +	}
2058 +
2059 +out:
2060 +	mutex_unlock(&lockdown_proc);
2061 +	return ret;
2062 +}
2063 +
2064 +void inline enter_irq_mode(void)
2065 +{
2066 +	int cpu = smp_processor_id();
2067 +
2068 +	if (os_isolation == 0)
2069 +		return;	
2070 +
2071 +	prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2072 +	prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2073 +	
2074 +	writel_relaxed(way_partitions[8], ld_i_reg(cpu));
2075 +	writel_relaxed(way_partitions[8], ld_d_reg(cpu));
2076 +}
2077 +
2078 +void inline exit_irq_mode(void)
2079 +{
2080 +	int cpu = smp_processor_id();
2081 +
2082 +	if (os_isolation == 0)
2083 +		return;
2084 +	writel_relaxed(prev_lockdown_i_reg[cpu], ld_i_reg(cpu));
2085 +	writel_relaxed(prev_lockdown_d_reg[cpu], ld_d_reg(cpu));	
2086 +}
2087 +
2088 +/* Operate on the Cortex-A9's ACTLR register */
2089 +#define ACTLR_L2_PREFETCH_HINT	(1 << 1)
2090 +#define ACTLR_L1_PREFETCH	(1 << 2)
2091 +
2092 +/*
2093 + * Change the ACTLR.
2094 + * @mode	- If 1 (0), set (clear) the bit given in @mask in the ACTLR.
2095 + * @mask	- A mask in which one bit is set to operate on the ACTLR.
2096 + */
2097 +static void actlr_change(int mode, int mask)
2098 +{
2099 +	u32 orig_value, new_value, reread_value;
2100 +
2101 +	if (0 != mode && 1 != mode) {
2102 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2103 +				__FUNCTION__);
2104 +		return;
2105 +	}
2106 +
2107 +	/* get the original value */
2108 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (orig_value));
2109 +
2110 +	if (0 == mode)
2111 +		new_value = orig_value & ~(mask);
2112 +	else
2113 +		new_value = orig_value | mask;
2114 +
2115 +	asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (new_value));
2116 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (reread_value));
2117 +
2118 +	printk("ACTLR: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2119 +			orig_value, new_value, reread_value);
2120 +}
2121 +
2122 +int litmus_l1_prefetch_proc_handler(struct ctl_table *table, int write,
2123 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2124 +{
2125 +	int ret, mode;
2126 +
2127 +	mutex_lock(&actlr_mutex);
2128 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2129 +
2130 +	if (!ret && write) {
2131 +		mode = *((int*)table->data);
2132 +		actlr_change(mode, ACTLR_L1_PREFETCH);
2133 +	}
2134 +	mutex_unlock(&actlr_mutex);
2135 +
2136 +	return ret;
2137 +}
2138 +
2139 +int litmus_l2_prefetch_hint_proc_handler(struct ctl_table *table, int write,
2140 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2141 +{
2142 +	int ret, mode;
2143 +
2144 +	mutex_lock(&actlr_mutex);
2145 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2146 +	if (!ret && write) {
2147 +		mode = *((int*)table->data);
2148 +		actlr_change(mode, ACTLR_L2_PREFETCH_HINT);
2149 +	}
2150 +	mutex_unlock(&actlr_mutex);
2151 +
2152 +	return ret;
2153 +}
2154 +
2155 +
2156 +/* Operate on the PL-310's Prefetch Control Register, L310_PREFETCH_CTRL */
2157 +#define L2X0_PREFETCH_DOUBLE_LINEFILL	(1 << 30)
2158 +#define L2X0_PREFETCH_INST_PREFETCH	(1 << 29)
2159 +#define L2X0_PREFETCH_DATA_PREFETCH	(1 << 28)
2160 +static void l2x0_prefetch_change(int mode, int mask)
2161 +{
2162 +	u32 orig_value, new_value, reread_value;
2163 +
2164 +	if (0 != mode && 1 != mode) {
2165 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2166 +				__FUNCTION__);
2167 +		return;
2168 +	}
2169 +
2170 +	orig_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2171 +
2172 +	if (0 == mode)
2173 +		new_value = orig_value & ~(mask);
2174 +	else
2175 +		new_value = orig_value | mask;
2176 +
2177 +	writel_relaxed(new_value, cache_base + L310_PREFETCH_CTRL);
2178 +	reread_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2179 +
2180 +	printk("l2x0 prefetch: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2181 +			orig_value, new_value, reread_value);
2182 +}
2183 +
2184 +int litmus_l2_double_linefill_proc_handler(struct ctl_table *table, int write,
2185 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2186 +{
2187 +	int ret, mode;
2188 +
2189 +	mutex_lock(&l2x0_prefetch_mutex);
2190 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2191 +	if (!ret && write) {
2192 +		mode = *((int*)table->data);
2193 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DOUBLE_LINEFILL);
2194 +	}
2195 +	mutex_unlock(&l2x0_prefetch_mutex);
2196 +
2197 +	return ret;
2198 +}
2199 +
2200 +int litmus_l2_data_prefetch_proc_handler(struct ctl_table *table, int write,
2201 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2202 +{
2203 +	int ret, mode;
2204 +
2205 +	mutex_lock(&l2x0_prefetch_mutex);
2206 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2207 +	if (!ret && write) {
2208 +		mode = *((int*)table->data);
2209 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DATA_PREFETCH|L2X0_PREFETCH_INST_PREFETCH);
2210 +	}
2211 +	mutex_unlock(&l2x0_prefetch_mutex);
2212 +
2213 +	return ret;
2214 +}
2215 +
2216 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
2217 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2218 +
2219 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
2220 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2221 +		
2222 +static struct ctl_table cache_table[] =
2223 +{
2224 +	{
2225 +		.procname	= "C0_LA_way",
2226 +		.mode		= 0666,
2227 +		.proc_handler	= way_partition_handler,
2228 +		.data		= &way_partitions[0],
2229 +		.maxlen		= sizeof(way_partitions[0]),
2230 +		.extra1		= &way_partition_min,
2231 +		.extra2		= &way_partition_max,
2232 +	},	
2233 +	{
2234 +		.procname	= "C0_LB_way",
2235 +		.mode		= 0666,
2236 +		.proc_handler	= way_partition_handler,
2237 +		.data		= &way_partitions[1],
2238 +		.maxlen		= sizeof(way_partitions[1]),
2239 +		.extra1		= &way_partition_min,
2240 +		.extra2		= &way_partition_max,
2241 +	},	
2242 +	{
2243 +		.procname	= "C1_LA_way",
2244 +		.mode		= 0666,
2245 +		.proc_handler	= way_partition_handler,
2246 +		.data		= &way_partitions[2],
2247 +		.maxlen		= sizeof(way_partitions[2]),
2248 +		.extra1		= &way_partition_min,
2249 +		.extra2		= &way_partition_max,
2250 +	},
2251 +	{
2252 +		.procname	= "C1_LB_way",
2253 +		.mode		= 0666,
2254 +		.proc_handler	= way_partition_handler,
2255 +		.data		= &way_partitions[3],
2256 +		.maxlen		= sizeof(way_partitions[3]),
2257 +		.extra1		= &way_partition_min,
2258 +		.extra2		= &way_partition_max,
2259 +	},
2260 +	{
2261 +		.procname	= "C2_LA_way",
2262 +		.mode		= 0666,
2263 +		.proc_handler	= way_partition_handler,
2264 +		.data		= &way_partitions[4],
2265 +		.maxlen		= sizeof(way_partitions[4]),
2266 +		.extra1		= &way_partition_min,
2267 +		.extra2		= &way_partition_max,
2268 +	},
2269 +	{
2270 +		.procname	= "C2_LB_way",
2271 +		.mode		= 0666,
2272 +		.proc_handler	= way_partition_handler,
2273 +		.data		= &way_partitions[5],
2274 +		.maxlen		= sizeof(way_partitions[5]),
2275 +		.extra1		= &way_partition_min,
2276 +		.extra2		= &way_partition_max,
2277 +	},
2278 +	{
2279 +		.procname	= "C3_LA_way",
2280 +		.mode		= 0666,
2281 +		.proc_handler	= way_partition_handler,
2282 +		.data		= &way_partitions[6],
2283 +		.maxlen		= sizeof(way_partitions[6]),
2284 +		.extra1		= &way_partition_min,
2285 +		.extra2		= &way_partition_max,
2286 +	},
2287 +	{
2288 +		.procname	= "C3_LB_way",
2289 +		.mode		= 0666,
2290 +		.proc_handler	= way_partition_handler,
2291 +		.data		= &way_partitions[7],
2292 +		.maxlen		= sizeof(way_partitions[7]),
2293 +		.extra1		= &way_partition_min,
2294 +		.extra2		= &way_partition_max,
2295 +	},	
2296 +	{
2297 +		.procname	= "Call_LC_way",
2298 +		.mode		= 0666,
2299 +		.proc_handler	= way_partition_handler,
2300 +		.data		= &way_partitions[8],
2301 +		.maxlen		= sizeof(way_partitions[8]),
2302 +		.extra1		= &way_partition_min,
2303 +		.extra2		= &way_partition_max,
2304 +	},		
2305 +	{
2306 +		.procname	= "lock_all",
2307 +		.mode		= 0666,
2308 +		.proc_handler	= lock_all_handler,
2309 +		.data		= &lock_all,
2310 +		.maxlen		= sizeof(lock_all),
2311 +		.extra1		= &zero,
2312 +		.extra2		= &one,
2313 +	},
2314 +	{
2315 +		.procname	= "l1_prefetch",
2316 +		.mode		= 0644,
2317 +		.proc_handler	= litmus_l1_prefetch_proc_handler,
2318 +		.data		= &l1_prefetch_proc,
2319 +		.maxlen		= sizeof(l1_prefetch_proc),
2320 +	},
2321 +	{
2322 +		.procname	= "l2_prefetch_hint",
2323 +		.mode		= 0644,
2324 +		.proc_handler	= litmus_l2_prefetch_hint_proc_handler,
2325 +		.data		= &l2_prefetch_hint_proc,
2326 +		.maxlen		= sizeof(l2_prefetch_hint_proc),
2327 +	},
2328 +	{
2329 +		.procname	= "l2_double_linefill",
2330 +		.mode		= 0644,
2331 +		.proc_handler	= litmus_l2_double_linefill_proc_handler,
2332 +		.data		= &l2_double_linefill_proc,
2333 +		.maxlen		= sizeof(l2_double_linefill_proc),
2334 +	},
2335 +	{
2336 +		.procname	= "l2_data_prefetch",
2337 +		.mode		= 0644,
2338 +		.proc_handler	= litmus_l2_data_prefetch_proc_handler,
2339 +		.data		= &l2_data_prefetch_proc,
2340 +		.maxlen		= sizeof(l2_data_prefetch_proc),
2341 +	},
2342 +	{
2343 +		.procname	= "os_isolation",
2344 +		.mode		= 0644,
2345 +		.proc_handler	= os_isolation_proc_handler,
2346 +		.data		= &os_isolation,
2347 +		.maxlen		= sizeof(os_isolation),
2348 +	},
2349 +	{
2350 +		.procname	= "use_part",
2351 +		.mode		= 0644,
2352 +		.proc_handler	= use_part_proc_handler,
2353 +		.data		= &use_part,
2354 +		.maxlen		= sizeof(use_part),
2355 +	},
2356 +	{
2357 +		.procname	= "do_perf_test",
2358 +		.mode		= 0644,
2359 +		.proc_handler	= do_perf_test_proc_handler,
2360 +	},
2361 +	{
2362 +		.procname	= "setup_flusher",
2363 +		.mode		= 0644,
2364 +		.proc_handler	= setup_flusher_proc_handler,
2365 +	},
2366 +	{
2367 +		.procname	= "lockdown_reg_0",
2368 +		.mode		= 0644,
2369 +		.proc_handler	= lockdown_reg_handler,
2370 +		.data		= &lockdown_reg[0],
2371 +		.maxlen		= sizeof(lockdown_reg[0]),
2372 +		.extra1		= &way_partition_min,
2373 +		.extra2		= &way_partition_max,
2374 +	},
2375 +	{
2376 +		.procname	= "lockdown_reg_1",
2377 +		.mode		= 0644,
2378 +		.proc_handler	= lockdown_reg_handler,
2379 +		.data		= &lockdown_reg[1],
2380 +		.maxlen		= sizeof(lockdown_reg[1]),
2381 +		.extra1		= &way_partition_min,
2382 +		.extra2		= &way_partition_max,
2383 +	},
2384 +	{
2385 +		.procname	= "lockdown_reg_2",
2386 +		.mode		= 0644,
2387 +		.proc_handler	= lockdown_reg_handler,
2388 +		.data		= &lockdown_reg[2],
2389 +		.maxlen		= sizeof(lockdown_reg[2]),
2390 +		.extra1		= &way_partition_min,
2391 +		.extra2		= &way_partition_max,
2392 +	},
2393 +	{
2394 +		.procname	= "lockdown_reg_3",
2395 +		.mode		= 0644,
2396 +		.proc_handler	= lockdown_reg_handler,
2397 +		.data		= &lockdown_reg[3],
2398 +		.maxlen		= sizeof(lockdown_reg[3]),
2399 +		.extra1		= &way_partition_min,
2400 +		.extra2		= &way_partition_max,
2401 +	},
2402 +	{
2403 +		.procname	= "lockdown_regs",
2404 +		.mode		= 0644,
2405 +		.proc_handler	= lockdown_global_handler,
2406 +		.data		= &lockdown_reg[8],
2407 +		.maxlen		= sizeof(lockdown_reg[8]),
2408 +		.extra1		= &way_partition_min,
2409 +		.extra2		= &way_partition_max,
2410 +	},
2411 +	{ }
2412 +};
2413 +
2414 +static struct ctl_table litmus_dir_table[] = {
2415 +	{
2416 +		.procname	= "litmus",
2417 + 		.mode		= 0555,
2418 +		.child		= cache_table,
2419 +	},
2420 +	{ }
2421 +};
2422 +
2423 +u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
2424 +{
2425 +	u32 v = 0;
2426 +
2427 +	__asm__ __volatile__ (
2428 +"	.align 5\n"
2429 +"	str	%[lockval], [%[cachereg]]\n"
2430 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2431 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2432 +"	bgt	1b\n				@ read more, if necessary\n"
2433 +	: [addr] "+r" (start),
2434 +	  [val] "+r" (v)
2435 +	: [end] "r" (end),
2436 +#ifdef CONFIG_CACHE_L2X0
2437 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2438 +#else
2439 +	  [cachereg] "r" (lockreg_d),
2440 +#endif
2441 +	  [lockval] "r" (lock_val)
2442 +	: "cc");
2443 +
2444 +	return v;
2445 +}
2446 +
2447 +
2448 +/*
2449 + * Prefetch by reading the first word of each cache line in a page.
2450 + *
2451 + * @lockdown_reg: address of the lockdown register to write
2452 + * @lock_val: value to be written to @lockdown_reg
2453 + * @unlock_val: will unlock the cache to this value
2454 + * @addr: start address to be prefetched
2455 + * @end_addr: end address to prefetch (exclusive)
2456 + *
2457 + * Assumes: addr < end_addr AND addr != end_addr
2458 + */
2459 +u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
2460 +{
2461 +#ifndef CONFIG_CACHE_L2X0
2462 +	unsigned long flags;
2463 +#endif
2464 +	u32 v = 0;
2465 +
2466 +#ifndef CONFIG_CACHE_L2X0
2467 +	raw_spin_lock_irqsave(&prefetch_lock, flags);
2468 +#endif
2469 +
2470 +	__asm__ __volatile__ (
2471 +"	.align 5\n"
2472 +"	str	%[lockval], [%[cachereg]]\n"
2473 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2474 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2475 +"	bgt	1b\n				@ read more, if necessary\n"
2476 +"	str	%[unlockval], [%[cachereg]]\n"
2477 +	: [addr] "+r" (start),
2478 +	  [val] "+r" (v)
2479 +	: [end] "r" (end),
2480 +#ifdef CONFIG_CACHE_L2X0
2481 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2482 +#else
2483 +	  [cachereg] "r" (lockreg_d),
2484 +#endif
2485 +	  [lockval] "r" (lock_val),
2486 +	  [unlockval] "r" (unlock_val)
2487 +	: "cc");
2488 +
2489 +#ifndef CONFIG_CACHE_L2X0
2490 +	raw_spin_unlock_irqrestore(&prefetch_lock, flags);
2491 +#endif
2492 +
2493 +	return v;
2494 +}
2495 +
2496 +static long update_timeval(struct timespec lhs, struct timespec rhs)
2497 +{
2498 +	long val;
2499 +	struct timespec ts;
2500 +
2501 +	ts = timespec_sub(rhs, lhs);
2502 +	val = ts.tv_sec*NSEC_PER_SEC + ts.tv_nsec;
2503 +
2504 +	return val;
2505 +}
2506 +
2507 +extern void v7_flush_kern_dcache_area(void *, size_t);
2508 +extern void v7_flush_kern_cache_all(void);
2509 +/*
2510 + * Ensure that this page is not in the L1 or L2 cache.
2511 + * Since the L1 cache is VIPT and the L2 cache is PIPT, we can use either the
2512 + * kernel or user vaddr.
2513 + */
2514 +void color_flush_page(void *vaddr, size_t size)
2515 +{
2516 +	v7_flush_kern_dcache_area(vaddr, size);
2517 +	//v7_flush_kern_cache_all();
2518 +}
2519 +
2520 +extern struct page* get_colored_page(unsigned long color);
2521 +
2522 +int setup_flusher_array(void)
2523 +{
2524 +	int color, way, ret = 0;
2525 +	struct page *page;
2526 +
2527 +	if (flusher_pages != NULL)
2528 +		goto out;
2529 +
2530 +	flusher_pages = (void***) kmalloc(MAX_NR_WAYS
2531 +			* sizeof(*flusher_pages), GFP_KERNEL);
2532 +	if (!flusher_pages) {
2533 +		printk(KERN_WARNING "No memory for flusher array!\n");
2534 +		ret = -EINVAL;
2535 +		goto out;
2536 +	}
2537 +
2538 +	for (way = 0; way < MAX_NR_WAYS; way++) {
2539 +		void **flusher_color_arr;
2540 +		flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
2541 +				* MAX_NR_COLORS, GFP_KERNEL);
2542 +		if (!flusher_color_arr) {
2543 +			printk(KERN_WARNING "No memory for flusher array!\n");
2544 +			ret = -ENOMEM;
2545 +			goto out_free;
2546 +		}
2547 +
2548 +		flusher_pages[way] = flusher_color_arr;
2549 +
2550 +		for (color = 0; color < MAX_NR_COLORS; color++) {
2551 +			int node;
2552 +			switch (color) {
2553 +				case 0:
2554 +					node = 48;
2555 +					break;
2556 +				case 1:
2557 +					node = 49;
2558 +					break;
2559 +				case 2:
2560 +					node = 50;
2561 +					break;
2562 +				case 3:
2563 +					node = 51;
2564 +					break;
2565 +				case 4:
2566 +					node = 68;
2567 +					break;
2568 +				case 5:
2569 +					node = 69;
2570 +					break;
2571 +				case 6:
2572 +					node = 86;
2573 +					break;
2574 +				case 7:
2575 +					node = 87;
2576 +					break;
2577 +				case 8:
2578 +					node = 88;
2579 +					break;
2580 +				case 9:
2581 +					node = 105;
2582 +					break;
2583 +				case 10:
2584 +					node = 106;
2585 +					break;
2586 +				case 11:
2587 +					node = 107;
2588 +					break;
2589 +				case 12:
2590 +					node = 108;
2591 +					break;					
2592 +				case 13:
2593 +					node = 125;
2594 +					break;
2595 +				case 14:
2596 +					node = 126;
2597 +					break;
2598 +				case 15:
2599 +					node = 127;
2600 +					break;
2601 +			}	
2602 +			page = get_colored_page(node);
2603 +			if (!page) {
2604 +				printk(KERN_WARNING "no more colored pages\n");
2605 +				ret = -EINVAL;
2606 +				goto out_free;
2607 +			}
2608 +			flusher_pages[way][color] = page_address(page);
2609 +			if (!flusher_pages[way][color]) {
2610 +				printk(KERN_WARNING "bad page address\n");
2611 +				ret = -EINVAL;
2612 +				goto out_free;
2613 +			}
2614 +		}
2615 +	}
2616 +out:
2617 +	return ret;
2618 +out_free:
2619 +	for (way = 0; way < MAX_NR_WAYS; way++) {
2620 +		for (color = 0; color < MAX_NR_COLORS; color++) {
2621 +			/* not bothering to try and give back colored pages */
2622 +		}
2623 +		kfree(flusher_pages[way]);
2624 +	}
2625 +	kfree(flusher_pages);
2626 +	flusher_pages = NULL;
2627 +	return ret;
2628 +}
2629 +
2630 +void flush_cache(int all)
2631 +{
2632 +	int way, color, cpu;
2633 +	unsigned long flags;
2634 +	
2635 +	raw_spin_lock_irqsave(&cache_lock, flags);
2636 +	cpu = raw_smp_processor_id();
2637 +	
2638 +	prev_lbm_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2639 +	prev_lbm_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2640 +	for (way=0;way<MAX_NR_WAYS;way++) {
2641 +		if (( (0x00000001 << way) & (prev_lbm_d_reg[cpu]) ) &&
2642 +			!all)
2643 +			continue;
2644 +		for (color=0;color<MAX_NR_COLORS;color++) {
2645 +			void *vaddr = flusher_pages[way][color];
2646 +			u32 lvalue  = unlocked_way[way];
2647 +			color_read_in_mem_lock(lvalue, LOCK_ALL,
2648 +					       vaddr, vaddr + PAGE_SIZE);
2649 +		}
2650 +
2651 +	}
2652 +
2653 +	writel_relaxed(prev_lbm_i_reg[cpu], ld_i_reg(cpu));
2654 +	writel_relaxed(prev_lbm_d_reg[cpu], ld_d_reg(cpu));
2655 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
2656 +}
2657 +
2658 +/* src = shared, dst = local */
2659 +#if 1 // random
2660 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
2661 +{
2662 +	/* size is in KB */
2663 +	long ret = 0;
2664 +	lt_t t1, t2;
2665 +	int numlines = size * CACHELINES_IN_1KB;
2666 +	int next, sum = 0, ran;
2667 +	unsigned long flags;
2668 +	
2669 +	get_random_bytes(&ran, sizeof(int));
2670 +	next = ran % ((size*1024)/sizeof(cacheline_t));
2671 +	
2672 +	//preempt_disable();
2673 +	if (type == 1) {
2674 +		int i, j;
2675 +		color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
2676 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2677 +		
2678 +		local_irq_save(flags);
2679 +		t1 = litmus_clock();
2680 +		for (i = 0; i < numlines; i++) {
2681 +			next = src[next].line[0];
2682 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2683 +				//dst[next].line[j] = src[next].line[j]; // read
2684 +				src[next].line[j] = dst[next].line[j]; // write
2685 +			}			
2686 +		}
2687 +		t2 = litmus_clock();
2688 +		local_irq_restore(flags);
2689 +		sum = next + (int)t2;
2690 +		t2 -= t1;
2691 +		ret = put_user(t2, ts);
2692 +	}
2693 +	else {
2694 +		int i, j;
2695 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2696 +		local_irq_save(flags);
2697 +		t1 = litmus_clock();
2698 +		for (i = 0; i < numlines; i++) {
2699 +			next = src[next].line[0];
2700 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2701 +				//dst[next].line[j] = src[next].line[j]; //read
2702 +				src[next].line[j] = dst[next].line[j]; //write
2703 +			}			
2704 +		}
2705 +		t2 = litmus_clock();
2706 +		local_irq_restore(flags);
2707 +		sum = next + (int)t2;
2708 +		t2 -= t1;
2709 +		ret = put_user(t2, ts);
2710 +		v7_flush_kern_dcache_area(src, size*1024);
2711 +	}
2712 +	//preempt_enable();
2713 +	flush_cache(1);
2714 +
2715 +	return ret;
2716 +}
2717 +#else
2718 +// sequential
2719 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
2720 +{
2721 +	/* size is in KB */
2722 +	long ret = 0;
2723 +	lt_t t1, t2;
2724 +	int numlines = size * CACHELINES_IN_1KB;
2725 +	int sum = 0;
2726 +	unsigned long flags;
2727 +	
2728 +	//preempt_disable();
2729 +	if (type == 1) {
2730 +		int i, j;
2731 +		color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
2732 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2733 +		
2734 +		local_irq_save(flags);
2735 +		t1 = litmus_clock();
2736 +		for (i = 0; i < numlines; i++) {
2737 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2738 +				//dst[i].line[j] = src[i].line[j]; // read
2739 +				src[i].line[j] = dst[i].line[j]; // write
2740 +			}			
2741 +		}
2742 +		t2 = litmus_clock();
2743 +		local_irq_restore(flags);
2744 +		sum = (int)(t1 + t2);
2745 +		t2 -= t1;
2746 +		ret = put_user(t2, ts);
2747 +	}
2748 +	else {
2749 +		int i, j;
2750 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2751 +		local_irq_save(flags);
2752 +		t1 = litmus_clock();
2753 +		for (i = 0; i < numlines; i++) {
2754 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2755 +				//dst[i].line[j] = src[i].line[j]; //read
2756 +				src[i].line[j] = dst[i].line[j]; //write
2757 +			}			
2758 +		}
2759 +		t2 = litmus_clock();
2760 +		local_irq_restore(flags);
2761 +		sum = (int)(t1 + t2);
2762 +		t2 -= t1;
2763 +		ret = put_user(t2, ts);
2764 +		v7_flush_kern_dcache_area(src, size*1024);
2765 +	}
2766 +	//preempt_enable();
2767 +	flush_cache(1);
2768 +
2769 +	return ret;
2770 +}
2771 +#endif
2772 +
2773 +asmlinkage long sys_lock_buffer(void* vaddr, size_t size, u32 lock_way, u32 unlock_way)
2774 +{
2775 +	/* size is in bytes */
2776 +	long ret = 0;
2777 +	int i;
2778 +	u32 lock_val, unlock_val;
2779 +	
2780 +	lock_val = ~lock_way & 0x0000ffff;
2781 +	unlock_val = ~unlock_way & 0x0000ffff;
2782 +	color_read_in_mem_lock(lock_val, unlock_val, (void*)vaddr, (void*)vaddr + size);
2783 +	
2784 +	return ret;
2785 +}
2786 +
2787 +#define TRIALS 1000
2788 +
2789 +static int perf_test(void) {
2790 +	struct timespec before, after;
2791 +	struct page *page;
2792 +	void *vaddr;
2793 +	u32 *data;
2794 +	long time, flush_time;
2795 +	int i, num_pages = 1;
2796 +	unsigned int order = 4;
2797 +
2798 +	for (i = 0; i < order; i++) {
2799 +		num_pages = num_pages*2;
2800 +	}
2801 +
2802 +	printk("Number of pages: %d\n", num_pages);
2803 +	//page = alloc_page(__GFP_MOVABLE);
2804 +	page = alloc_pages(__GFP_MOVABLE, order);
2805 +	if (!page) {
2806 +		printk(KERN_WARNING "No memory\n");
2807 +		return -ENOMEM;
2808 +	}
2809 +
2810 +	vaddr = page_address(page);
2811 +	if (!vaddr)
2812 +		printk(KERN_WARNING "%s: vaddr is null\n", __FUNCTION__);
2813 +	data = (u32*) vaddr;
2814 +
2815 +	getnstimeofday(&before);
2816 +	barrier();
2817 +	for (i = 0; i < TRIALS; i++) {
2818 +		color_flush_page(vaddr, PAGE_SIZE*num_pages);
2819 +	}
2820 +	barrier();
2821 +	getnstimeofday(&after);
2822 +	time = update_timeval(before, after);
2823 +	printk("Average for flushes without re-reading: %ld\n", time / TRIALS);
2824 +	flush_time = time / TRIALS;
2825 +
2826 +	color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2827 +	
2828 +	barrier();
2829 +	getnstimeofday(&before);
2830 +	barrier();
2831 +	for (i = 0; i < TRIALS; i++) {
2832 +		color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2833 +	}
2834 +	barrier();
2835 +	getnstimeofday(&after);
2836 +	time = update_timeval(before, after);
2837 +	printk("Average for read from cache: %ld\n", time / TRIALS);
2838 +
2839 +	getnstimeofday(&before);
2840 +	barrier();
2841 +	for (i = 0; i < TRIALS; i++) {
2842 +		color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2843 +		color_flush_page(vaddr, PAGE_SIZE*num_pages);
2844 +	}
2845 +	barrier();
2846 +	getnstimeofday(&after);
2847 +	time = update_timeval(before, after);
2848 +	printk("Average for read from mem: %ld (%ld)\n", time / TRIALS - flush_time, time / TRIALS);
2849 +
2850 +	// write in locked way
2851 +	color_read_in_mem_lock(nr_unlocked_way[2], LOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2852 +	for (i = 0; i < PAGE_SIZE*num_pages/sizeof(u32); i++) {
2853 +		data[i] = i%63353;
2854 +	}
2855 +	// read
2856 +	barrier();
2857 +	getnstimeofday(&before);
2858 +	barrier();
2859 +	for (i = 0; i < TRIALS; i++) {
2860 +		color_read_in_mem(unlocked_way[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2861 +	}
2862 +	barrier();
2863 +	getnstimeofday(&after);
2864 +	time = update_timeval(before, after);
2865 +	printk("Average for read in after write: %ld\n", time / TRIALS);
2866 +	
2867 +	
2868 +	//free_page((unsigned long)vaddr);
2869 +	free_pages((unsigned long)vaddr, order);
2870 +
2871 +	return 0;
2872 +}
2873 +
2874 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
2875 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2876 +{
2877 +	int ret = 0;
2878 +
2879 +	if (write) {
2880 +		ret = perf_test();
2881 +	}
2882 +
2883 +	return ret;
2884 +}
2885 +
2886 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
2887 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2888 +{
2889 +	int ret = -EINVAL;
2890 +
2891 +	if (write && flusher_pages == NULL) {
2892 +		ret = setup_flusher_array();
2893 +		printk(KERN_INFO "setup flusher return: %d\n", ret);
2894 +	
2895 +	}
2896 +	else if (flusher_pages) {
2897 +		printk(KERN_INFO "flusher_pages is already set!\n");
2898 +		ret = 0;
2899 +	}
2900 +	
2901 +	return ret;
2902 +}
2903 +
2904 +static struct ctl_table_header *litmus_sysctls;
2905 +
2906 +static int __init litmus_sysctl_init(void)
2907 +{
2908 +	int ret = 0;
2909 +
2910 +	printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n");
2911 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
2912 +	if (!litmus_sysctls) {
2913 +		printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n");
2914 +		ret = -EFAULT;
2915 +		goto out;
2916 +	}
2917 +
2918 +	way_partition_min = 0x00000000;
2919 +	way_partition_max = 0x0000FFFF;
2920 +	
2921 +out:
2922 +	return ret;
2923 +}
2924 +
2925 +module_init(litmus_sysctl_init);
2926 diff --git litmus/color_shm.c litmus/color_shm.c
2927 new file mode 100644
2928 index 0000000..d4913cd
2929 --- /dev/null
2930 +++ litmus/color_shm.c
2931 @@ -0,0 +1,402 @@
2932 +#include <linux/sched.h>
2933 +#include <linux/mm.h>
2934 +#include <linux/fs.h>
2935 +#include <linux/miscdevice.h>
2936 +#include <linux/spinlock.h>
2937 +#include <linux/module.h>
2938 +#include <linux/highmem.h>
2939 +#include <linux/slab.h>
2940 +#include <linux/mutex.h>
2941 +#include <asm/uaccess.h>
2942 +
2943 +#include <litmus/litmus.h>
2944 +
2945 +#define DEV_NAME	"litmus/color_shm"
2946 +
2947 +/* Major number assigned to our device. 
2948 + * Refer Documentation/devices.txt */
2949 +#define SHM_MAJOR			240
2950 +#define MAX_COLORED_PAGE	256
2951 +#define NUM_BANKS			8
2952 +#define NUM_COLORS			16
2953 +
2954 +static struct mutex dev_lock;
2955 +static int bypass_cache;
2956 +
2957 +struct color_ioctl_cmd {
2958 +	unsigned int color;
2959 +	unsigned int bank;
2960 +};
2961 +
2962 +struct color_ioctl_offset {
2963 +	unsigned long offset;
2964 +	int lock;
2965 +};
2966 +
2967 +#define SET_COLOR_SHM_CMD		_IOW(SHM_MAJOR, 0x1, struct color_ioctl_cmd)
2968 +#define SET_COLOR_SHM_OFFSET	_IOW(SHM_MAJOR, 0x2, struct color_ioctl_offset)
2969 +
2970 +struct color_ioctl_cmd color_param;
2971 +struct color_ioctl_offset color_offset;
2972 +
2973 +static int mmap_common_checks(struct vm_area_struct *vma)
2974 +{
2975 +	/* you can only map the "first" page */
2976 +	if (vma->vm_pgoff != 0)
2977 +		return -EINVAL;
2978 +
2979 +	return 0;
2980 +}
2981 +
2982 +static void mmap_common_vma_flags(struct vm_area_struct *vma)
2983 +{
2984 +	/* This mapping should not be kept across forks,
2985 +	 * cannot be expanded, and is not a "normal" page. */
2986 +	//vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO | VM_SHARED | VM_MAYSHARE;
2987 +	vma->vm_flags |= VM_SHARED | VM_MAYSHARE | VM_LOCKED;
2988 +
2989 +	/* We don't want the first write access to trigger a "minor" page fault
2990 +	 * to mark the page as dirty.  This is transient, private memory, we
2991 +	 * don't care if it was touched or not. __S011 means RW access, but not
2992 +	 * execute, and avoids copy-on-write behavior.
2993 +	 * See protection_map in mmap.c.  */
2994 +	vma->vm_page_prot = PAGE_SHARED;
2995 +}
2996 +
2997 +#define vma_nr_pages(vma) \
2998 +	({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;})
2999 +
3000 +extern struct page* get_colored_page(unsigned long color);
3001 +
3002 +static int do_map_colored_page(struct vm_area_struct *vma,
3003 +		const unsigned long addr,
3004 +		const unsigned long color_no)
3005 +{
3006 +	int err = 0;
3007 +	unsigned long offset = 2048;
3008 +	
3009 +	struct page *page = get_colored_page(color_no);
3010 +
3011 +	if (!page) {
3012 +		printk(KERN_INFO "Could not get page with color %lu.\n",
3013 +				color_no);
3014 +		err = -ENOMEM;
3015 +		goto out;
3016 +	}
3017 +
3018 +	printk(KERN_INFO "vma: %p  addr: 0x%lx  color_no: %lu\n",
3019 +			vma, addr, color_no);
3020 +	
3021 +	printk(KERN_INFO "vm_start: %lu vm_end: %lu\n",
3022 +			vma->vm_start, vma->vm_end);
3023 +
3024 +	printk(KERN_INFO "inserting page (pa: 0x%lx) at vaddr: 0x%lx  "
3025 +			"flags: 0x%lx  prot: 0x%lx\n",
3026 +			page_to_phys(page), addr,
3027 +			vma->vm_flags, pgprot_val(vma->vm_page_prot));
3028 +
3029 +	
3030 +	err = vm_insert_page(vma, addr, page);
3031 +	if (err) {
3032 +		printk(KERN_INFO "vm_insert_page() failed (%d)\n", err);
3033 +		err = -EINVAL;
3034 +		goto out;