Attachment 'litmus-rt-rtas17.patch'

Download

   1 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
   2 index 6e1fb2b..e2284fe 100644
   3 --- a/arch/arm/boot/compressed/Makefile
   4 +++ b/arch/arm/boot/compressed/Makefile
   5 @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
   6  ORIG_CFLAGS := $(KBUILD_CFLAGS)
   7  KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
   8  endif
   9 +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
  10  
  11  ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  12  asflags-y := -DZIMAGE
  13 diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
  14 index 0c462a9..5291b70 100644
  15 --- a/arch/arm/include/asm/unistd.h
  16 +++ b/arch/arm/include/asm/unistd.h
  17 @@ -19,7 +19,8 @@
  18   * This may need to be greater than __NR_last_syscall+1 in order to
  19   * account for the padding in the syscall table
  20   */
  21 -#define __NR_syscalls  (388 + NR_litmus_syscalls)
  22 +#define __NR_syscalls  (388 + NR_litmus_syscalls + 0)
  23 +
  24  
  25  /*
  26   * *NOTE*: This is a ghost syscall private to the kernel.  Only the
  27 diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
  28 index f4738a8..55dc863 100644
  29 --- a/arch/arm/kernel/calls.S
  30 +++ b/arch/arm/kernel/calls.S
  31 @@ -409,6 +409,14 @@
  32          CALL(sys_wait_for_ts_release)
  33  		CALL(sys_release_ts)
  34  		CALL(sys_null_call)
  35 +/* 400 */	CALL(sys_get_current_budget)
  36 +		CALL(sys_reservation_create)
  37 +		CALL(sys_reservation_destroy)
  38 +		CALL(sys_set_mc2_task_param)
  39 +		CALL(sys_set_page_color)
  40 +/* 405 */	CALL(sys_test_call)
  41 +		CALL(sys_run_test)
  42 +		CALL(sys_lock_buffer)
  43  
  44  #ifndef syscalls_counted
  45  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
  46 diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
  47 index 350f188..720b45e 100644
  48 --- a/arch/arm/kernel/irq.c
  49 +++ b/arch/arm/kernel/irq.c
  50 @@ -44,6 +44,8 @@
  51  #include <asm/mach/irq.h>
  52  #include <asm/mach/time.h>
  53  
  54 +#include <litmus/cache_proc.h>
  55 +
  56  unsigned long irq_err_count;
  57  
  58  int arch_show_interrupts(struct seq_file *p, int prec)
  59 @@ -66,7 +68,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
  60   */
  61  void handle_IRQ(unsigned int irq, struct pt_regs *regs)
  62  {
  63 +	enter_irq_mode();
  64  	__handle_domain_irq(NULL, irq, false, regs);
  65 +	exit_irq_mode();
  66  }
  67  
  68  /*
  69 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
  70 index e309c8f..71c969a 100644
  71 --- a/arch/arm/mm/cache-l2x0.c
  72 +++ b/arch/arm/mm/cache-l2x0.c
  73 @@ -33,6 +33,8 @@
  74  #include "cache-tauros3.h"
  75  #include "cache-aurora-l2.h"
  76  
  77 +#include <litmus/cache_proc.h>
  78 +
  79  struct l2c_init_data {
  80  	const char *type;
  81  	unsigned way_size_0;
  82 @@ -726,7 +728,6 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  83  
  84  	if (n) {
  85  		unsigned i;
  86 -
  87  		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
  88  		for (i = 0; i < n; i++)
  89  			pr_cont(" %s", errata[i]);
  90 @@ -774,6 +775,11 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
  91  	},
  92  };
  93  
  94 +void l2c310_flush_all(void)
  95 +{
  96 +	l2c210_flush_all();
  97 +};
  98 +
  99  static int __init __l2c_init(const struct l2c_init_data *data,
 100  			     u32 aux_val, u32 aux_mask, u32 cache_id)
 101  {
 102 @@ -876,6 +882,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
 103  	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
 104  		data->type, cache_id, aux);
 105  
 106 +	litmus_setup_lockdown(l2x0_base, cache_id);
 107 +	
 108  	return 0;
 109  }
 110  
 111 diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
 112 index 34680a5..b303a9b 100644
 113 --- a/arch/x86/syscalls/syscall_32.tbl
 114 +++ b/arch/x86/syscalls/syscall_32.tbl
 115 @@ -377,3 +377,11 @@
 116  368	i386	wait_for_ts_release	sys_wait_for_ts_release
 117  369	i386	release_ts		sys_release_ts
 118  370	i386	null_call		sys_null_call
 119 +371	i386	get_current_budget	sys_get_current_budget
 120 +372	i386	reservation_create	sys_reservation_create
 121 +373	i386	reservation_destroy	sys_reservation_destroy
 122 +374	i386	set_mc2_task_param	sys_set_mc2_task_param
 123 +375	i386	set_page_color		sys_set_page_color
 124 +376	i386	test_call		sys_test_call
 125 +377	i386	run_test		sys_run_test
 126 +378	i386	lock_buffer		sys_lock_buffer
 127 diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
 128 index cbd1b6b..5f24a80 100644
 129 --- a/arch/x86/syscalls/syscall_64.tbl
 130 +++ b/arch/x86/syscalls/syscall_64.tbl
 131 @@ -342,6 +342,14 @@
 132  360	common	wait_for_ts_release	sys_wait_for_ts_release
 133  361	common	release_ts		sys_release_ts
 134  362	common	null_call		sys_null_call
 135 +363	common	get_current_budget	sys_get_current_budget
 136 +364	common	reservation_create	sys_reservation_create
 137 +365	common	reservation_destroy	sys_reservation_destroy
 138 +366	common	set_mc2_task_param	sys_set_mc2_task_param
 139 +367	common	set_page_color		sys_set_page_color
 140 +368	common	test_call		sys_test_call
 141 +369	common	run_test		sys_run_test
 142 +370	common	lock_buffer		sys_lock_buffer
 143  
 144  #
 145  # x32-specific system call numbers start at 512 to avoid cache impact
 146 diff --git a/include/linux/migrate.h b/include/linux/migrate.h
 147 index cac1c09..b16047b 100644
 148 --- a/include/linux/migrate.h
 149 +++ b/include/linux/migrate.h
 150 @@ -33,6 +33,8 @@ extern int migrate_page(struct address_space *,
 151  			struct page *, struct page *, enum migrate_mode);
 152  extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
 153  		unsigned long private, enum migrate_mode mode, int reason);
 154 +extern int replicate_pages(struct list_head *l, new_page_t new, free_page_t free,
 155 +		unsigned long private, enum migrate_mode mode, int reason);
 156  
 157  extern int migrate_prep(void);
 158  extern int migrate_prep_local(void);
 159 @@ -50,7 +52,11 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
 160  		free_page_t free, unsigned long private, enum migrate_mode mode,
 161  		int reason)
 162  	{ return -ENOSYS; }
 163 -
 164 +static inline int replicate_pages(struct list_head *l, new_page_t new,
 165 +		free_page_t free, unsigned long private, enum migrate_mode mode,
 166 +		int reason)
 167 +	{ return -ENOSYS; }
 168 +	
 169  static inline int migrate_prep(void) { return -ENOSYS; }
 170  static inline int migrate_prep_local(void) { return -ENOSYS; }
 171  
 172 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
 173 index c89c53a..7c90e02 100644
 174 --- a/include/linux/rmap.h
 175 +++ b/include/linux/rmap.h
 176 @@ -188,7 +188,8 @@ int page_referenced(struct page *, int is_locked,
 177  #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 178  
 179  int try_to_unmap(struct page *, enum ttu_flags flags);
 180 -
 181 +int try_to_unmap_one_only(struct page *page, struct vm_area_struct *vma,
 182 +		     unsigned long address, void *arg);
 183  /*
 184   * Used by uprobes to replace a userspace page safely
 185   */
 186 diff --git a/include/litmus/budget.h b/include/litmus/budget.h
 187 index bd2d5c9..60eb814 100644
 188 --- a/include/litmus/budget.h
 189 +++ b/include/litmus/budget.h
 190 @@ -33,4 +33,6 @@ static inline int requeue_preempted_job(struct task_struct* t)
 191  		(!budget_exhausted(t) || !budget_enforced(t));
 192  }
 193  
 194 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining);
 195 +
 196  #endif
 197 diff --git a/include/litmus/cache_proc.h b/include/litmus/cache_proc.h
 198 new file mode 100644
 199 index 0000000..e9440de
 200 --- /dev/null
 201 +++ b/include/litmus/cache_proc.h
 202 @@ -0,0 +1,17 @@
 203 +#ifndef LITMUS_CACHE_PROC_H
 204 +#define LITMUS_CACHE_PROC_H
 205 +
 206 +#ifdef __KERNEL__
 207 +
 208 +void litmus_setup_lockdown(void __iomem*, u32);
 209 +void enter_irq_mode(void);
 210 +void exit_irq_mode(void);
 211 +void flush_cache(int all);
 212 +void lock_cache(int cpu, u32 val);
 213 +
 214 +extern struct page *new_alloc_page_color(unsigned long color);
 215 +
 216 +#endif
 217 +
 218 +#endif
 219 +
 220 diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
 221 index a6eb534..441210c 100644
 222 --- a/include/litmus/litmus.h
 223 +++ b/include/litmus/litmus.h
 224 @@ -113,6 +113,13 @@ static inline lt_t litmus_clock(void)
 225  	((current)->state == TASK_RUNNING || 	\
 226  	 preempt_count() & PREEMPT_ACTIVE)
 227  
 228 +#define is_running(t) 			\
 229 +	((t)->state == TASK_RUNNING || 	\
 230 +	 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
 231 +
 232 +#define is_blocked(t)       \
 233 +	(!is_running(t))
 234 +
 235  #define is_released(t, now)	\
 236  	(lt_before_eq(get_release(t), now))
 237  #define is_tardy(t, now)    \
 238 diff --git a/include/litmus/mc2_common.h b/include/litmus/mc2_common.h
 239 new file mode 100644
 240 index 0000000..e3c0af2
 241 --- /dev/null
 242 +++ b/include/litmus/mc2_common.h
 243 @@ -0,0 +1,31 @@
 244 +/*
 245 + * MC^2 common data structures
 246 + */
 247 + 
 248 +#ifndef __UNC_MC2_COMMON_H__
 249 +#define __UNC_MC2_COMMON_H__
 250 +
 251 +enum crit_level {
 252 +	CRIT_LEVEL_A = 0,
 253 +	CRIT_LEVEL_B = 1,
 254 +	CRIT_LEVEL_C = 2,
 255 +	NUM_CRIT_LEVELS = 3,
 256 +};
 257 +
 258 +struct mc2_task {
 259 +	enum crit_level crit;
 260 +	unsigned int res_id;
 261 +};
 262 +
 263 +#ifdef __KERNEL__
 264 +
 265 +#include <litmus/reservation.h>
 266 +
 267 +#define tsk_mc2_data(t)		(tsk_rt(t)->mc2_data)
 268 +
 269 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
 270 +							struct reservation *res);
 271 +	
 272 +#endif /* __KERNEL__ */
 273 +
 274 +#endif
 275 \ No newline at end of file
 276 diff --git a/include/litmus/polling_reservations.h b/include/litmus/polling_reservations.h
 277 new file mode 100644
 278 index 0000000..66c9b1e
 279 --- /dev/null
 280 +++ b/include/litmus/polling_reservations.h
 281 @@ -0,0 +1,36 @@
 282 +#ifndef LITMUS_POLLING_RESERVATIONS_H
 283 +#define LITMUS_POLLING_RESERVATIONS_H
 284 +
 285 +#include <litmus/reservation.h>
 286 +
 287 +struct polling_reservation {
 288 +	/* extend basic reservation */
 289 +	struct reservation res;
 290 +
 291 +	lt_t max_budget;
 292 +	lt_t period;
 293 +	lt_t deadline;
 294 +	lt_t offset;
 295 +};
 296 +
 297 +void polling_reservation_init(struct polling_reservation *pres, int use_edf_prio,
 298 +	int use_periodic_polling, lt_t budget, lt_t period, lt_t deadline, lt_t offset);
 299 +
 300 +struct table_driven_reservation {
 301 +	/* extend basic reservation */
 302 +	struct reservation res;
 303 +
 304 +	lt_t major_cycle;
 305 +	unsigned int next_interval;
 306 +	unsigned int num_intervals;
 307 +	struct lt_interval *intervals;
 308 +
 309 +	/* info about current scheduling slot */
 310 +	struct lt_interval cur_interval;
 311 +	lt_t major_cycle_start;
 312 +};
 313 +
 314 +void table_driven_reservation_init(struct table_driven_reservation *tdres,
 315 +	lt_t major_cycle, struct lt_interval *intervals, unsigned int num_intervals);
 316 +
 317 +#endif
 318 diff --git a/include/litmus/replicate_lib.h b/include/litmus/replicate_lib.h
 319 new file mode 100644
 320 index 0000000..186837b
 321 --- /dev/null
 322 +++ b/include/litmus/replicate_lib.h
 323 @@ -0,0 +1,19 @@
 324 +#ifndef LITMUS_REPLICATE_LIB_H
 325 +#define LITMUS_REPLICATE_LIB_H
 326 +
 327 +#include <linux/list.h>
 328 +#include <linux/mm_types.h>
 329 +#include <linux/mm_inline.h>
 330 +
 331 +/* Data structure for the "master" list */
 332 +struct shared_lib_page {
 333 +	struct page *master_page;
 334 +	struct page *r_page[NR_CPUS+1];
 335 +	unsigned long int master_pfn;
 336 +	unsigned long int r_pfn[NR_CPUS+1];
 337 +	struct list_head list;
 338 +};
 339 +
 340 +extern struct list_head shared_lib_pages;
 341 +
 342 +#endif
 343 diff --git a/include/litmus/reservation.h b/include/litmus/reservation.h
 344 new file mode 100644
 345 index 0000000..7e022b3
 346 --- /dev/null
 347 +++ b/include/litmus/reservation.h
 348 @@ -0,0 +1,256 @@
 349 +#ifndef LITMUS_RESERVATION_H
 350 +#define LITMUS_RESERVATION_H
 351 +
 352 +#include <linux/list.h>
 353 +#include <linux/hrtimer.h>
 354 +
 355 +struct reservation_client;
 356 +struct reservation_environment;
 357 +struct reservation;
 358 +
 359 +typedef enum {
 360 +	/* reservation has no clients, is not consuming budget */
 361 +	RESERVATION_INACTIVE = 0,
 362 +
 363 +	/* reservation has clients, consumes budget when scheduled */
 364 +	RESERVATION_ACTIVE,
 365 +
 366 +	/* reservation has no clients, but may be consuming budget */
 367 +	RESERVATION_ACTIVE_IDLE,
 368 +
 369 +	/* Reservation has no budget and waits for
 370 +	 * replenishment. May or may not have clients. */
 371 +	RESERVATION_DEPLETED,
 372 +} reservation_state_t;
 373 +
 374 +
 375 +/* ************************************************************************** */
 376 +
 377 +/* Select which task to dispatch. If NULL is returned, it means there is nothing
 378 + * to schedule right now and background work can be scheduled. */
 379 +typedef struct task_struct * (*dispatch_t)  (
 380 +	struct reservation_client *client
 381 +);
 382 +
 383 +/* Something that can be managed in a reservation and that can yield
 384 + * a process for dispatching. Contains a pointer to the reservation
 385 + * to which it "belongs". */
 386 +struct reservation_client {
 387 +	struct list_head list;
 388 +	struct reservation* reservation;
 389 +	dispatch_t dispatch;
 390 +};
 391 +
 392 +
 393 +/* ************************************************************************** */
 394 +
 395 +/* Called by reservations to request state change. */
 396 +typedef void (*reservation_change_state_t)  (
 397 +	struct reservation_environment* env,
 398 +	struct reservation *res,
 399 +	reservation_state_t new_state
 400 +);
 401 +
 402 +/* The framework within wich reservations operate. */
 403 +struct reservation_environment {
 404 +	lt_t time_zero;
 405 +	lt_t current_time;
 406 +
 407 +	/* services invoked by reservations */
 408 +	reservation_change_state_t change_state;
 409 +};
 410 +
 411 +
 412 +/* ************************************************************************** */
 413 +
 414 +/* A new client is added or an existing client resumes. */
 415 +typedef void (*client_arrives_t)  (
 416 +	struct reservation *reservation,
 417 +	struct reservation_client *client
 418 +);
 419 +
 420 +/* A client suspends or terminates. */
 421 +typedef void (*client_departs_t)  (
 422 +	struct reservation *reservation,
 423 +	struct reservation_client *client,
 424 +	int did_signal_job_completion
 425 +);
 426 +
 427 +/* A previously requested replenishment has occurred. */
 428 +typedef void (*on_replenishment_timer_t)  (
 429 +	struct reservation *reservation
 430 +);
 431 +
 432 +/* Update the reservation's budget to reflect execution or idling. */
 433 +typedef void (*drain_budget_t) (
 434 +	struct reservation *reservation,
 435 +	lt_t how_much
 436 +);
 437 +
 438 +/* Select a ready task from one of the clients for scheduling. */
 439 +typedef struct task_struct* (*dispatch_client_t)  (
 440 +	struct reservation *reservation,
 441 +	lt_t *time_slice /* May be used to force rescheduling after
 442 +	                    some amount of time. 0 => no limit */
 443 +);
 444 +
 445 +
 446 +struct reservation_ops {
 447 +	dispatch_client_t dispatch_client;
 448 +
 449 +	client_arrives_t client_arrives;
 450 +	client_departs_t client_departs;
 451 +
 452 +	on_replenishment_timer_t replenish;
 453 +	drain_budget_t drain_budget;
 454 +};
 455 +
 456 +struct reservation {
 457 +	/* used to queue in environment */
 458 +	struct list_head list;
 459 +
 460 +	reservation_state_t state;
 461 +	unsigned int id;
 462 +
 463 +	/* exact meaning defined by impl. */
 464 +	lt_t priority;
 465 +	lt_t cur_budget;
 466 +	lt_t next_replenishment;
 467 +
 468 +	/* budget stats */
 469 +	lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
 470 +	lt_t budget_consumed_total;
 471 +
 472 +	/* interaction with framework */
 473 +	struct reservation_environment *env;
 474 +	struct reservation_ops *ops;
 475 +
 476 +	struct list_head clients;
 477 +	
 478 +	/* for global env. */
 479 +	int scheduled_on;
 480 +	int event_added;
 481 +	/* for blocked by ghost. Do not charge budget when ACTIVE */
 482 +	int blocked_by_ghost;
 483 +	/* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
 484 +	int is_ghost;
 485 +};
 486 +
 487 +void reservation_init(struct reservation *res);
 488 +
 489 +/* Default implementations */
 490 +
 491 +/* simply select the first client in the list, set *for_at_most to zero */
 492 +struct task_struct* default_dispatch_client(
 493 +	struct reservation *res,
 494 +	lt_t *for_at_most
 495 +);
 496 +
 497 +/* "connector" reservation client to hook up tasks with reservations */
 498 +struct task_client {
 499 +	struct reservation_client client;
 500 +	struct task_struct *task;
 501 +};
 502 +
 503 +void task_client_init(struct task_client *tc, struct task_struct *task,
 504 +	struct reservation *reservation);
 505 +
 506 +#define SUP_RESCHEDULE_NOW (0)
 507 +#define SUP_NO_SCHEDULER_UPDATE (ULLONG_MAX)
 508 +
 509 +/* A simple uniprocessor (SUP) flat (i.e., non-hierarchical) reservation
 510 + * environment.
 511 + */
 512 +struct sup_reservation_environment {
 513 +	struct reservation_environment env;
 514 +
 515 +	/* ordered by priority */
 516 +	struct list_head active_reservations;
 517 +
 518 +	/* ordered by next_replenishment */
 519 +	struct list_head depleted_reservations;
 520 +
 521 +	/* unordered */
 522 +	struct list_head inactive_reservations;
 523 +
 524 +	/* - SUP_RESCHEDULE_NOW means call sup_dispatch() now
 525 +	 * - SUP_NO_SCHEDULER_UPDATE means nothing to do
 526 +	 * any other value means program a timer for the given time
 527 +	 */
 528 +	lt_t next_scheduler_update;
 529 +	/* set to true if a call to sup_dispatch() is imminent */
 530 +	bool will_schedule;
 531 +};
 532 +
 533 +/* Contract:
 534 + *  - before calling into sup_ code, or any reservation methods,
 535 + *    update the time with sup_update_time(); and
 536 + *  - after calling into sup_ code, or any reservation methods,
 537 + *    check next_scheduler_update and program timer or trigger
 538 + *    scheduler invocation accordingly.
 539 + */
 540 +
 541 +void sup_init(struct sup_reservation_environment* sup_env);
 542 +void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
 543 +	struct reservation* new_res);
 544 +void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
 545 +	lt_t timeout);
 546 +void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
 547 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
 548 +
 549 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
 550 +	unsigned int id);
 551 +	
 552 +/* A global multiprocessor reservation environment. */
 553 +
 554 +typedef enum {
 555 +	EVENT_REPLENISH = 0,
 556 +	EVENT_DRAIN,
 557 +	EVENT_OTHERS,
 558 +} event_type_t;
 559 +
 560 +
 561 +struct next_timer_event {
 562 +	lt_t next_update;
 563 +	int timer_armed_on;
 564 +	unsigned int id;
 565 +	event_type_t type;
 566 +	struct list_head list;
 567 +};
 568 +
 569 +struct gmp_reservation_environment {
 570 +	raw_spinlock_t lock;
 571 +	struct reservation_environment env;
 572 +
 573 +	/* ordered by priority */
 574 +	struct list_head active_reservations;
 575 +
 576 +	/* ordered by next_replenishment */
 577 +	struct list_head depleted_reservations;
 578 +
 579 +	/* unordered */
 580 +	struct list_head inactive_reservations;
 581 +
 582 +	/* timer event ordered by next_update */
 583 +	struct list_head next_events;
 584 +	
 585 +	/* (schedule_now == true) means call gmp_dispatch() now */
 586 +	int schedule_now;
 587 +	/* set to true if a call to gmp_dispatch() is imminent */
 588 +	bool will_schedule;
 589 +};
 590 +
 591 +void gmp_init(struct gmp_reservation_environment* gmp_env);
 592 +void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
 593 +	struct reservation* new_res);
 594 +void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
 595 +	lt_t timeout, unsigned int id, event_type_t type);
 596 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
 597 +int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
 598 +struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
 599 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
 600 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
 601 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
 602 +	unsigned int id);
 603 +
 604 +#endif
 605 diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
 606 index 7b9a909..56de045 100644
 607 --- a/include/litmus/rt_param.h
 608 +++ b/include/litmus/rt_param.h
 609 @@ -51,6 +51,16 @@ typedef enum {
 610  	TASK_EARLY
 611  } release_policy_t;
 612  
 613 +#ifdef CONFIG_PGMRT_SUPPORT
 614 +typedef enum {
 615 +    PGM_NOT_A_NODE,
 616 +    PGM_SRC,
 617 +    PGM_SINK,
 618 +    PGM_SRC_SINK,
 619 +    PGM_INTERNAL
 620 +} pgm_node_type_t;
 621 +#endif
 622 +
 623  /* We use the common priority interpretation "lower index == higher priority",
 624   * which is commonly used in fixed-priority schedulability analysis papers.
 625   * So, a numerically lower priority value implies higher scheduling priority,
 626 @@ -62,6 +72,7 @@ typedef enum {
 627  #define LITMUS_MAX_PRIORITY     512
 628  #define LITMUS_HIGHEST_PRIORITY   1
 629  #define LITMUS_LOWEST_PRIORITY    (LITMUS_MAX_PRIORITY - 1)
 630 +#define LITMUS_NO_PRIORITY		UINT_MAX
 631  
 632  /* Provide generic comparison macros for userspace,
 633   * in case that we change this later. */
 634 @@ -71,6 +82,46 @@ typedef enum {
 635  	((p) >= LITMUS_HIGHEST_PRIORITY &&	\
 636  	 (p) <= LITMUS_LOWEST_PRIORITY)
 637  
 638 +/* reservation support */
 639 +
 640 +typedef enum {
 641 +	PERIODIC_POLLING,
 642 +	SPORADIC_POLLING,
 643 +	TABLE_DRIVEN,
 644 +} reservation_type_t;
 645 +
 646 +struct lt_interval {
 647 +	lt_t start;
 648 +	lt_t end;
 649 +};
 650 +
 651 +#ifndef __KERNEL__
 652 +#define __user
 653 +#endif
 654 +
 655 +struct reservation_config {
 656 +	unsigned int id;
 657 +	lt_t priority;
 658 +	int  cpu;
 659 +
 660 +	union {
 661 +		struct {
 662 +			lt_t period;
 663 +			lt_t budget;
 664 +			lt_t relative_deadline;
 665 +			lt_t offset;
 666 +		} polling_params;
 667 +
 668 +		struct {
 669 +			lt_t major_cycle_length;
 670 +			unsigned int num_intervals;
 671 +			struct lt_interval __user *intervals;
 672 +		} table_driven_params;
 673 +	};
 674 +};
 675 +
 676 +/* regular sporadic task support */
 677 +
 678  struct rt_task {
 679  	lt_t 		exec_cost;
 680  	lt_t 		period;
 681 @@ -81,6 +132,10 @@ struct rt_task {
 682  	task_class_t	cls;
 683  	budget_policy_t  budget_policy;  /* ignored by pfair */
 684  	release_policy_t release_policy;
 685 +#ifdef CONFIG_PGMRT_SUPPORT
 686 +	pgm_node_type_t	pgm_type;
 687 +	lt_t			pgm_expected_etoe;
 688 +#endif
 689  };
 690  
 691  union np_flag {
 692 @@ -121,6 +176,13 @@ struct control_page {
 693  	uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
 694  				     * started. */
 695  
 696 +#ifdef CONFIG_PGMRT_SUPPORT
 697 +    /* Flags from userspace signifying PGM wait states. */
 698 +    volatile uint32_t   pgm_waiting;    /* waiting for tokens */
 699 +    volatile uint32_t   pgm_sending;    /* sending tokens */
 700 +    volatile uint32_t   pgm_satisfied;  /* done waiting/sending */
 701 +#endif
 702 +
 703  	/* to be extended */
 704  };
 705  
 706 @@ -165,6 +227,7 @@ struct rt_job {
 707  };
 708  
 709  struct pfair_param;
 710 +struct mc2_task;
 711  
 712  /*	RT task parameters for scheduling extensions
 713   *	These parameters are inherited during clone and therefore must
 714 @@ -246,7 +309,10 @@ struct rt_param {
 715  	volatile int		linked_on;
 716  
 717  	/* PFAIR/PD^2 state. Allocated on demand. */
 718 -	struct pfair_param*	pfair;
 719 +	union {
 720 +		void *plugin_state;
 721 +		struct pfair_param *pfair;
 722 +	};
 723  
 724  	/* Fields saved before BE->RT transition.
 725  	 */
 726 @@ -275,6 +341,10 @@ struct rt_param {
 727  
 728  	/* Pointer to the page shared between userspace and kernel. */
 729  	struct control_page * ctrl_page;
 730 +
 731 +	/* Mixed-criticality specific data */
 732 +	struct mc2_task* mc2_data;
 733 +	unsigned long addr_ctrl_page;
 734  };
 735  
 736  #endif
 737 diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
 738 index 0ccccd6..4c8aaa6 100644
 739 --- a/include/litmus/sched_plugin.h
 740 +++ b/include/litmus/sched_plugin.h
 741 @@ -77,6 +77,17 @@ typedef long (*wait_for_release_at_t)(lt_t release_time);
 742  /* Informs the plugin when a synchronous release takes place. */
 743  typedef void (*synchronous_release_at_t)(lt_t time_zero);
 744  
 745 +/* How much budget has the current task consumed so far, and how much
 746 + * has it left? The default implementation ties into the per-task
 747 + * budget enforcement code. Plugins can override this to report
 748 + * reservation-specific values. */
 749 +typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining);
 750 +
 751 +/* Reservation creation/removal backends. Meaning of reservation_type and
 752 + * reservation_id are entirely plugin-specific. */
 753 +typedef long (*reservation_create_t)(int reservation_type, void* __user config);
 754 +typedef long (*reservation_destroy_t)(unsigned int reservation_id, int cpu);
 755 +
 756  /************************ misc routines ***********************/
 757  
 758  
 759 @@ -109,6 +120,12 @@ struct sched_plugin {
 760  	task_exit_t 		task_exit;
 761  	task_cleanup_t		task_cleanup;
 762  
 763 +	current_budget_t	current_budget;
 764 +
 765 +	/* Reservation support */
 766 +	reservation_create_t	reservation_create;
 767 +	reservation_destroy_t	reservation_destroy;
 768 +
 769  #ifdef CONFIG_LITMUS_LOCKING
 770  	/*	locking protocols	*/
 771  	allocate_lock_t		allocate_lock;
 772 diff --git a/include/litmus/trace.h b/include/litmus/trace.h
 773 index 6017872..24ca412 100644
 774 --- a/include/litmus/trace.h
 775 +++ b/include/litmus/trace.h
 776 @@ -118,6 +118,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 777  #define TS_TICK_START(t)		CPU_TTIMESTAMP(110, t)
 778  #define TS_TICK_END(t) 			CPU_TTIMESTAMP(111, t)
 779  
 780 +#define TS_RELEASE_C_START		CPU_DTIMESTAMP(108, TSK_RT)
 781 +#define TS_RELEASE_C_END		CPU_DTIMESTAMP(109, TSK_RT)
 782 +
 783  #define TS_QUANTUM_BOUNDARY_START	CPU_TIMESTAMP_CUR(112)
 784  #define TS_QUANTUM_BOUNDARY_END		CPU_TIMESTAMP_CUR(113)
 785  
 786 @@ -137,6 +140,17 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 787  #define TS_SEND_RESCHED_START(c)	MSG_TIMESTAMP_SENT(190, c)
 788  #define TS_SEND_RESCHED_END		MSG_TIMESTAMP_RECEIVED(191)
 789  
 790 -#define TS_RELEASE_LATENCY(when)	CPU_LTIMESTAMP(208, &(when))
 791 +#define TS_ISR_START			CPU_TIMESTAMP_CUR(192)
 792 +#define TS_ISR_END				CPU_TIMESTAMP_CUR(193)
 793 +
 794 +#define TS_RELEASE_LATENCY(when)    CPU_LTIMESTAMP(208, &(when))
 795 +#define TS_RELEASE_LATENCY_A(when)  CPU_LTIMESTAMP(209, &(when))
 796 +#define TS_RELEASE_LATENCY_B(when)  CPU_LTIMESTAMP(210, &(when))
 797 +#define TS_RELEASE_LATENCY_C(when)  CPU_LTIMESTAMP(211, &(when))
 798 +
 799 +#define TS_SCHED_A_START			CPU_DTIMESTAMP(212, TSK_UNKNOWN)
 800 +#define TS_SCHED_A_END(t)			CPU_TTIMESTAMP(213, t)
 801 +#define TS_SCHED_C_START			CPU_DTIMESTAMP(214, TSK_UNKNOWN)
 802 +#define TS_SCHED_C_END(t)			CPU_TTIMESTAMP(215, t)
 803  
 804  #endif /* !_SYS_TRACE_H_ */
 805 diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
 806 index 94264c2..86bbbb8d 100644
 807 --- a/include/litmus/unistd_32.h
 808 +++ b/include/litmus/unistd_32.h
 809 @@ -17,5 +17,13 @@
 810  #define __NR_wait_for_ts_release __LSC(9)
 811  #define __NR_release_ts		__LSC(10)
 812  #define __NR_null_call		__LSC(11)
 813 +#define __NR_get_current_budget __LSC(12)
 814 +#define __NR_reservation_create	__LSC(13)
 815 +#define __NR_reservation_destroy	__LSC(14)
 816 +#define __NR_set_mc2_task_param	__LSC(15)
 817 +#define __NR_set_page_color		__LSC(16)
 818 +#define __NR_test_call		__LSC(17)
 819 +#define __NR_run_test		__LSC(18)
 820 +#define __NR_lock_buffer	__LSC(19)
 821  
 822 -#define NR_litmus_syscalls 12
 823 +#define NR_litmus_syscalls	20
 824 diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
 825 index d5ced0d..4b96e7c 100644
 826 --- a/include/litmus/unistd_64.h
 827 +++ b/include/litmus/unistd_64.h
 828 @@ -29,5 +29,22 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
 829  __SYSCALL(__NR_release_ts, sys_release_ts)
 830  #define __NR_null_call				__LSC(11)
 831  __SYSCALL(__NR_null_call, sys_null_call)
 832 +#define __NR_get_current_budget			__LSC(12)
 833 +__SYSCALL(__NR_get_current_budget, sys_get_current_budget)
 834 +#define __NR_reservation_create		__LSC(13)
 835 +__SYSCALL(__NR_reservation_create, sys_reservation_create)
 836 +#define __NR_reservation_destroy	__LSC(14)
 837 +__SYSCALL(__NR_reservation_destroy, sys_reservation_destroy)
 838 +#define __NR_set_mc2_task_param		__LSC(15)
 839 +__SYSCALL(__NR_set_mc2_task_param,	sys_set_mc2_task_param)
 840 +#define __NR_set_page_color			__LSC(16)
 841 +__SYSCALL(__NR_set_page_color,		sys_set_page_color)
 842 +#define __NR_test_call				__LSC(17)
 843 +__SYSCALL(__NR_test_call, sys_test_call)
 844 +#define __NR_run_test				__LSC(18)
 845 +__SYSCALL(__NR_run_test, sys_run_test)
 846 +#define __NR_lock_buffer			__LSC(19)
 847 +__SYACALL(__NR_lock_buffer, sys_lock_buffer)
 848  
 849 -#define NR_litmus_syscalls 12
 850 +
 851 +#define NR_litmus_syscalls 20
 852 diff --git a/kernel/sched/litmus.c b/kernel/sched/litmus.c
 853 index 9d58690..cd36358 100644
 854 --- a/kernel/sched/litmus.c
 855 +++ b/kernel/sched/litmus.c
 856 @@ -20,8 +20,9 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p)
 857  	/* task counter */
 858  	p->se.sum_exec_runtime += delta;
 859  	if (delta) {
 860 -		TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
 861 -			delta, p->rt_param.job_params.exec_time, budget_remaining(p));
 862 +		//TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
 863 +			//delta, p->rt_param.job_params.exec_time, budget_remaining(p));
 864 +		;
 865  	}
 866  	/* sched_clock() */
 867  	p->se.exec_start = rq->clock;
 868 diff --git a/litmus/Makefile b/litmus/Makefile
 869 index 7970cd5..7e4711c 100644
 870 --- a/litmus/Makefile
 871 +++ b/litmus/Makefile
 872 @@ -11,6 +11,7 @@ obj-y     = sched_plugin.o litmus.o \
 873  	    sync.o \
 874  	    rt_domain.o \
 875  	    edf_common.o \
 876 +		mc2_common.o \
 877  	    fp_common.o \
 878  	    fdso.o \
 879  	    locking.o \
 880 @@ -19,13 +20,19 @@ obj-y     = sched_plugin.o litmus.o \
 881  	    binheap.o \
 882  	    ctrldev.o \
 883  	    uncachedev.o \
 884 +		reservation.o \
 885 +		polling_reservations.o \
 886  	    sched_gsn_edf.o \
 887  	    sched_psn_edf.o \
 888 -	    sched_pfp.o
 889 +	    sched_pfp.o \
 890 +		sched_mc2.o \
 891 +		bank_proc.o \
 892 +	    color_shm.o \
 893 +		replicate_lib.o \
 894 +		cache_proc.o
 895  
 896  obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
 897  obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
 898 -
 899  obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
 900  obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
 901  obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
 902 diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c
 903 new file mode 100644
 904 index 0000000..2284f4f
 905 --- /dev/null
 906 +++ b/litmus/bank_proc.c
 907 @@ -0,0 +1,741 @@
 908 +/*
 909 + * bank_proc.c -- Implementation of the page coloring for cache and bank partition. 
 910 + *                The file will keep a pool of colored pages. Users can require pages with 
 911 + *		  specific color or bank number.
 912 + *                Part of the code is modified from Jonathan Herman's code  
 913 + */
 914 +#include <linux/init.h>
 915 +#include <linux/types.h>
 916 +#include <linux/kernel.h>
 917 +#include <linux/module.h>
 918 +#include <linux/sysctl.h>
 919 +#include <linux/slab.h>
 920 +#include <linux/io.h>
 921 +#include <linux/mutex.h>
 922 +#include <linux/mm.h>
 923 +#include <linux/random.h>
 924 +
 925 +#include <litmus/litmus_proc.h>
 926 +#include <litmus/sched_trace.h>
 927 +#include <litmus/litmus.h>
 928 +
 929 +#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
 930 +
 931 +// This Address Decoding is used in imx6-sabredsd platform
 932 +#define BANK_MASK  0x38000000     
 933 +#define BANK_SHIFT  27
 934 +
 935 +#define CACHE_MASK  0x0000f000      
 936 +#define CACHE_SHIFT 12
 937 +
 938 +#define PAGES_PER_COLOR 2000
 939 +#define PAGES_PER_COLOR_HALF 1000
 940 +unsigned int NUM_PAGE_LIST;  //8*16
 941 +
 942 +unsigned int number_banks;
 943 +unsigned int number_cachecolors;
 944 +
 945 +unsigned int set_partition_max = 0x0000ffff;
 946 +unsigned int set_partition_min = 0;
 947 +unsigned int bank_partition_max = 0x000000ff;
 948 +unsigned int bank_partition_min = 0;
 949 +
 950 +int show_page_pool = 0;
 951 +int refill_page_pool = 0;
 952 +spinlock_t reclaim_lock;
 953 +
 954 +unsigned int set_partition[9] = {
 955 +        0x00000003,  /* Core 0, and Level A*/
 956 +        0x00000003,  /* Core 0, and Level B*/
 957 +        0x0000000C,  /* Core 1, and Level A*/
 958 +        0x0000000C,  /* Core 1, and Level B*/
 959 +        0x00000030,  /* Core 2, and Level A*/
 960 +        0x00000030,  /* Core 2, and Level B*/
 961 +        0x000000C0,  /* Core 3, and Level A*/
 962 +        0x000000C0,  /* Core 3, and Level B*/
 963 +        0x0000ff00,  /* Level C */
 964 +};
 965 +
 966 +unsigned int bank_partition[9] = {
 967 +        0x00000010,  /* Core 0, and Level A*/
 968 +        0x00000010,  /* Core 0, and Level B*/
 969 +        0x00000020,  /* Core 1, and Level A*/
 970 +        0x00000020,  /* Core 1, and Level B*/
 971 +        0x00000040,  /* Core 2, and Level A*/
 972 +        0x00000040,  /* Core 2, and Level B*/
 973 +        0x00000080,  /* Core 3, and Level A*/
 974 +        0x00000080,  /* Core 3, and Level B*/
 975 +        0x0000000c,  /* Level C */
 976 +};
 977 +
 978 +unsigned int set_index[9] = {
 979 +    0, 0, 0, 0, 0, 0, 0, 0, 0
 980 +};
 981 +
 982 +unsigned int bank_index[9] = {
 983 +    0, 0, 0, 0, 0, 0, 0, 0, 0
 984 +};
 985 +
 986 +struct mutex void_lockdown_proc;
 987 +
 988 +
 989 +/*
 990 + * Every page list should contain a lock, a list, and a number recording how many pages it store
 991 + */ 
 992 +struct color_group {
 993 +	spinlock_t lock;
 994 +	char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
 995 +	struct list_head list;
 996 +	atomic_t nr_pages;
 997 +};
 998 +
 999 +
1000 +static struct color_group *color_groups;
1001 +
1002 +/*
1003 + * Naive function to count the number of 1's
1004 + */
1005 +unsigned int counting_one_set(unsigned int v)
1006 +{
1007 +//    unsigned int v; // count the number of bits set in v
1008 +    unsigned int c; // c accumulates the total bits set in v
1009 +
1010 +    for (c = 0; v; v >>= 1)
1011 +    {
1012 +        c += v & 1;
1013 +    }
1014 +    return c;
1015 +}
1016 +
1017 +unsigned int two_exp(unsigned int e)
1018 +{
1019 +    unsigned int v = 1;
1020 +    for (; e>0; e-- )
1021 +    {
1022 +        v=v*2;
1023 +    }
1024 +    return v;
1025 +}
1026 +
1027 +unsigned int num_by_bitmask_index(unsigned int bitmask, unsigned int index)
1028 +{
1029 +    unsigned int pos = 0;
1030 +
1031 +    while(true)
1032 +    {
1033 +        if(index ==0 && (bitmask & 1)==1)
1034 +        {
1035 +            break;
1036 +        }
1037 +        if(index !=0 && (bitmask & 1)==1){
1038 +            index--;
1039 +        }
1040 +        pos++;
1041 +        bitmask = bitmask >>1;
1042 +
1043 +    }
1044 +    return pos;
1045 +}
1046 +
1047 +
1048 +
1049 +/* Decoding page color, 0~15 */ 
1050 +static inline unsigned int page_color(struct page *page)
1051 +{
1052 +	return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
1053 +}
1054 +
1055 +/* Decoding page bank number, 0~7 */ 
1056 +static inline unsigned int page_bank(struct page *page)
1057 +{
1058 +	return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
1059 +}
1060 +
1061 +static inline unsigned int page_list_index(struct page *page)
1062 +{
1063 +    unsigned int idx;  
1064 +    idx = (page_color(page) + page_bank(page)*(number_cachecolors));
1065 +//    printk("address = %lx, ", page_to_phys(page));
1066 +//    printk("color(%d), bank(%d), indx = %d\n", page_color(page), page_bank(page), idx);
1067 +
1068 +    return idx; 
1069 +}
1070 +
1071 +
1072 +
1073 +/*
1074 + * It is used to determine the smallest number of page lists. 
1075 + */
1076 +static unsigned long smallest_nr_pages(void)
1077 +{
1078 +	unsigned long i, min_pages;
1079 +	struct color_group *cgroup;
1080 +	cgroup = &color_groups[16*2];
1081 +	min_pages =atomic_read(&cgroup->nr_pages); 
1082 +	for (i = 16*2; i < NUM_PAGE_LIST; ++i) {
1083 +		cgroup = &color_groups[i];
1084 +		if (atomic_read(&cgroup->nr_pages) < min_pages)
1085 +			min_pages = atomic_read(&cgroup->nr_pages);
1086 +	}
1087 +	return min_pages;
1088 +}
1089 +
1090 +static void show_nr_pages(void)
1091 +{
1092 +	unsigned long i;
1093 +	struct color_group *cgroup;
1094 +	printk("show nr pages***************************************\n");
1095 +	for (i = 0; i < NUM_PAGE_LIST; ++i) {
1096 +		cgroup = &color_groups[i];
1097 +		printk("(%03d) =  %03d, ", i, atomic_read(&cgroup->nr_pages));
1098 +		if((i % 8) ==7){
1099 +		    printk("\n");
1100 +                }
1101 +	}
1102 +}
1103 +
1104 +/*
1105 + * Add a page to current pool.
1106 + */
1107 +void add_page_to_color_list(struct page *page)
1108 +{
1109 +	const unsigned long color = page_list_index(page);
1110 +	struct color_group *cgroup = &color_groups[color];
1111 +	BUG_ON(in_list(&page->lru) || PageLRU(page));
1112 +	BUG_ON(page_count(page) > 1);
1113 +	spin_lock(&cgroup->lock);
1114 +	list_add_tail(&page->lru, &cgroup->list);
1115 +	atomic_inc(&cgroup->nr_pages);
1116 +	SetPageLRU(page);
1117 +	spin_unlock(&cgroup->lock);
1118 +}
1119 +
1120 +/*
1121 + * Replenish the page pool. 
1122 + * If the newly allocate page is what we want, it will be pushed to the correct page list
1123 + * otherwise, it will be freed. 
1124 + */
1125 +static int do_add_pages(void)
1126 +{
1127 +	//printk("LITMUS do add pages\n");
1128 +	
1129 +	struct page *page, *page_tmp;
1130 +	LIST_HEAD(free_later);
1131 +	unsigned long color;
1132 +	int ret = 0;
1133 +	int i = 0;
1134 +	int free_counter = 0;
1135 +	unsigned long counter[128]= {0}; 
1136 +        
1137 +        //printk("Before refill : \n");
1138 +        //show_nr_pages();
1139 +
1140 +	// until all the page lists contain enough pages 
1141 +	//for (i =0; i<5; i++) {
1142 +	for (i=0; i< 1024*100;i++) {
1143 +	//while (smallest_nr_pages() < PAGES_PER_COLOR) {
1144 +       //         printk("smallest = %d\n", smallest_nr_pages());	
1145 +		page = alloc_page(GFP_HIGHUSER_MOVABLE);
1146 +	    //    page = alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0);
1147 +	
1148 +		if (unlikely(!page)) {
1149 +			printk(KERN_WARNING "Could not allocate pages.\n");
1150 +			ret = -ENOMEM;
1151 +			goto out;
1152 +		}
1153 +		color = page_list_index(page);
1154 +		counter[color]++;
1155 +	//	printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1156 +                //show_nr_pages();
1157 +		if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) {
1158 +		//if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) {
1159 +		//if ( PAGES_PER_COLOR && color>=16*2) {
1160 +			add_page_to_color_list(page);
1161 +	//		printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page));
1162 +		} else{
1163 +			// Pages here will be freed later 
1164 +			list_add_tail(&page->lru, &free_later);
1165 +			free_counter++;
1166 +		        //list_del(&page->lru);
1167 +		//        __free_page(page);
1168 +	//		printk("useless page(%d) = color %x, bank %x\n", color,  page_color(page), page_bank(page));
1169 +		}
1170 +               //show_nr_pages();
1171 +                /*
1172 +                if(free_counter >= PAGES_PER_COLOR)
1173 +                {
1174 +                    printk("free unwanted page list eariler");
1175 +                    free_counter = 0;
1176 +	            list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1177 +		        list_del(&page->lru);
1178 +		        __free_page(page);
1179 +	            }
1180 +
1181 +                    show_nr_pages();
1182 +                }
1183 +                */
1184 +        }
1185 +/*        printk("page counter = \n");
1186 +        for (i=0; i<128; i++)
1187 +        {
1188 +            printk("(%03d) = %4d, ", i , counter[i]);
1189 +            if(i%8 == 7){
1190 +                printk("\n");
1191 +            }
1192 +
1193 +        }
1194 +*/	
1195 +        //printk("After refill : \n");
1196 +        //show_nr_pages();
1197 +#if 1
1198 +	// Free the unwanted pages
1199 +	list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1200 +		list_del(&page->lru);
1201 +		__free_page(page);
1202 +	}
1203 +#endif
1204 +out:
1205 +        return ret;
1206 +}
1207 +
1208 +/*
1209 + * Provide pages for replacement according cache color 
1210 + * This should be the only implementation here
1211 + * This function should not be accessed by others directly. 
1212 + * 
1213 + */ 
1214 +static struct  page *new_alloc_page_color( unsigned long color, int do_refill)
1215 +{
1216 +//	printk("allocate new page color = %d\n", color);	
1217 +	struct color_group *cgroup;
1218 +	struct page *rPage = NULL;
1219 +		
1220 +	if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
1221 +		TRACE_CUR("Wrong color %lu\n", color);	
1222 +//		printk(KERN_WARNING "Wrong color %lu\n", color);
1223 +		goto out;
1224 +	}
1225 +
1226 +		
1227 +	cgroup = &color_groups[color];
1228 +	spin_lock(&cgroup->lock);
1229 +	if (unlikely(!atomic_read(&cgroup->nr_pages))) {
1230 +		TRACE_CUR("No free %lu colored pages.\n", color);
1231 +//		printk(KERN_WARNING "no free %lu colored pages.\n", color);
1232 +		goto out_unlock;
1233 +	}
1234 +	rPage = list_first_entry(&cgroup->list, struct page, lru);
1235 +	BUG_ON(page_count(rPage) > 1);
1236 +	//get_page(rPage);
1237 +	list_del(&rPage->lru);
1238 +	atomic_dec(&cgroup->nr_pages);
1239 +	ClearPageLRU(rPage);
1240 +out_unlock:
1241 +	spin_unlock(&cgroup->lock);
1242 +out:
1243 +	if( smallest_nr_pages() == 0 && do_refill == 1)
1244 +        {
1245 +		do_add_pages();
1246 +       //     printk("ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n");        
1247 +        
1248 +        }
1249 +		
1250 +	return rPage;
1251 +}
1252 +
1253 +struct page* get_colored_page(unsigned long color)
1254 +{
1255 +	return new_alloc_page_color(color, 1);
1256 +}
1257 +
1258 +/*
1259 + * provide pages for replacement according to  
1260 + * node = 0 for Level A tasks in Cpu 0
1261 + * node = 1 for Level B tasks in Cpu 0
1262 + * node = 2 for Level A tasks in Cpu 1
1263 + * node = 3 for Level B tasks in Cpu 1
1264 + * node = 4 for Level A tasks in Cpu 2
1265 + * node = 5 for Level B tasks in Cpu 2
1266 + * node = 6 for Level A tasks in Cpu 3
1267 + * node = 7 for Level B tasks in Cpu 3
1268 + * node = 8 for Level C tasks 
1269 + */
1270 +struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
1271 +{
1272 +//	printk("allocate new page node = %d\n", node);	
1273 +//	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
1274 +	struct color_group *cgroup;
1275 +	struct page *rPage = NULL;
1276 +	unsigned int color;
1277 +	
1278 +
1279 +    unsigned int idx = 0;
1280 +	do {
1281 +        idx += num_by_bitmask_index(set_partition[node], set_index[node]);
1282 +        idx += number_cachecolors* num_by_bitmask_index(bank_partition[node], bank_index[node]);
1283 +		rPage =  new_alloc_page_color(idx, 0);
1284 +	} while (rPage == NULL);
1285 +        
1286 +            
1287 +        set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]);
1288 +        bank_index[node] = (bank_index[node]+1) % counting_one_set(bank_partition[node]);
1289 +	return rPage; 
1290 +}
1291 +
1292 +
1293 +/*
1294 + * Reclaim pages.
1295 + */
1296 +void reclaim_page(struct page *page)
1297 +{
1298 +	const unsigned long color = page_list_index(page);
1299 +	unsigned long nr_reclaimed = 0;
1300 +	spin_lock(&reclaim_lock);
1301 +    	put_page(page);
1302 +	add_page_to_color_list(page);
1303 +
1304 +	spin_unlock(&reclaim_lock);
1305 +	printk("Reclaimed page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1306 +}
1307 +
1308 +
1309 +/*
1310 + * Initialize the numbers of banks and cache colors 
1311 + */ 
1312 +static int __init init_variables(void)
1313 +{
1314 +	number_banks = counting_one_set(BANK_MASK); 
1315 +	number_banks = two_exp(number_banks); 
1316 +
1317 +	number_cachecolors = counting_one_set(CACHE_MASK);
1318 +	number_cachecolors = two_exp(number_cachecolors);
1319 +	NUM_PAGE_LIST = number_banks * number_cachecolors; 
1320 +        printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
1321 +	mutex_init(&void_lockdown_proc);
1322 +	spin_lock_init(&reclaim_lock);
1323 +
1324 +}
1325 +
1326 +
1327 +/*
1328 + * Initialize the page pool 
1329 + */
1330 +static int __init init_color_groups(void)
1331 +{
1332 +	struct color_group *cgroup;
1333 +	unsigned long i;
1334 +	int err = 0;
1335 +
1336 +        printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
1337 +        color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
1338 +
1339 +	if (!color_groups) {
1340 +		printk(KERN_WARNING "Could not allocate color groups.\n");
1341 +		err = -ENOMEM;
1342 +	}else{
1343 +
1344 +		for (i = 0; i < NUM_PAGE_LIST; ++i) {
1345 +			cgroup = &color_groups[i];
1346 +			atomic_set(&cgroup->nr_pages, 0);
1347 +			INIT_LIST_HEAD(&cgroup->list);
1348 +			spin_lock_init(&cgroup->lock);
1349 +		}
1350 +	}
1351 +        return err;
1352 +}
1353 +
1354 +int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1355 +		size_t *lenp, loff_t *ppos)
1356 +{
1357 +	int ret = 0, i = 0;
1358 +	mutex_lock(&void_lockdown_proc);
1359 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1360 +	if (ret)
1361 +		goto out;
1362 +	if (write) {
1363 +            printk("New set Partition : \n");
1364 +	    for(i =0;i <9;i++)
1365 +            {
1366 +                set_index[i] = 0;
1367 +                printk("set[%d] = %x \n", i, set_partition[i]);
1368 +            }
1369 +	}
1370 +out:
1371 +	mutex_unlock(&void_lockdown_proc);
1372 +	return ret;
1373 +}
1374 +
1375 +int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1376 +		size_t *lenp, loff_t *ppos)
1377 +{
1378 +	int ret = 0, i = 0;
1379 +	mutex_lock(&void_lockdown_proc);
1380 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1381 +	if (ret)
1382 +		goto out;
1383 +	if (write) {
1384 +	    for(i =0;i <9;i++)
1385 +            {
1386 +                bank_index[i] = 0;
1387 +            }
1388 +	}
1389 +out:
1390 +	mutex_unlock(&void_lockdown_proc);
1391 +	return ret;
1392 +}
1393 +
1394 +int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1395 +		size_t *lenp, loff_t *ppos)
1396 +{
1397 +	int ret = 0, i = 0;
1398 +	mutex_lock(&void_lockdown_proc);
1399 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1400 +	if (ret)
1401 +		goto out;
1402 +	if (write) {
1403 +            show_nr_pages();
1404 +	}
1405 +out:
1406 +	mutex_unlock(&void_lockdown_proc);
1407 +	return ret;
1408 +}
1409 +
1410 +int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1411 +		size_t *lenp, loff_t *ppos)
1412 +{
1413 +	int ret = 0, i = 0;
1414 +	mutex_lock(&void_lockdown_proc);
1415 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1416 +	if (ret)
1417 +		goto out;
1418 +	if (write) {
1419 +            do_add_pages();
1420 +	}
1421 +out:
1422 +	mutex_unlock(&void_lockdown_proc);
1423 +	return ret;
1424 +}
1425 +
1426 +static struct ctl_table cache_table[] =
1427 +{
1428 +        
1429 +	{
1430 +		.procname	= "C0_LA_set",
1431 +		.mode		= 0666,
1432 +		.proc_handler	= set_partition_handler,
1433 +		.data		= &set_partition[0],
1434 +		.maxlen		= sizeof(set_partition[0]),
1435 +		.extra1		= &set_partition_min,
1436 +		.extra2		= &set_partition_max,
1437 +	},	
1438 +	{
1439 +		.procname	= "C0_LB_set",
1440 +		.mode		= 0666,
1441 +		.proc_handler	= set_partition_handler,
1442 +		.data		= &set_partition[1],
1443 +		.maxlen		= sizeof(set_partition[1]),
1444 +		.extra1		= &set_partition_min,
1445 +		.extra2		= &set_partition_max,
1446 +	},	
1447 +	{
1448 +		.procname	= "C1_LA_set",
1449 +		.mode		= 0666,
1450 +		.proc_handler	= set_partition_handler,
1451 +		.data		= &set_partition[2],
1452 +		.maxlen		= sizeof(set_partition[2]),
1453 +		.extra1		= &set_partition_min,
1454 +		.extra2		= &set_partition_max,
1455 +	},
1456 +	{
1457 +		.procname	= "C1_LB_set",
1458 +		.mode		= 0666,
1459 +		.proc_handler	= set_partition_handler,
1460 +		.data		= &set_partition[3],
1461 +		.maxlen		= sizeof(set_partition[3]),
1462 +		.extra1		= &set_partition_min,
1463 +		.extra2		= &set_partition_max,
1464 +	},
1465 +	{
1466 +		.procname	= "C2_LA_set",
1467 +		.mode		= 0666,
1468 +		.proc_handler	= set_partition_handler,
1469 +		.data		= &set_partition[4],
1470 +		.maxlen		= sizeof(set_partition[4]),
1471 +		.extra1		= &set_partition_min,
1472 +		.extra2		= &set_partition_max,
1473 +	},
1474 +	{
1475 +		.procname	= "C2_LB_set",
1476 +		.mode		= 0666,
1477 +		.proc_handler	= set_partition_handler,
1478 +		.data		= &set_partition[5],
1479 +		.maxlen		= sizeof(set_partition[5]),
1480 +		.extra1		= &set_partition_min,
1481 +		.extra2		= &set_partition_max,
1482 +	},
1483 +	{
1484 +		.procname	= "C3_LA_set",
1485 +		.mode		= 0666,
1486 +		.proc_handler	= set_partition_handler,
1487 +		.data		= &set_partition[6],
1488 +		.maxlen		= sizeof(set_partition[6]),
1489 +		.extra1		= &set_partition_min,
1490 +		.extra2		= &set_partition_max,
1491 +	},
1492 +	{
1493 +		.procname	= "C3_LB_set",
1494 +		.mode		= 0666,
1495 +		.proc_handler	= set_partition_handler,
1496 +		.data		= &set_partition[7],
1497 +		.maxlen		= sizeof(set_partition[7]),
1498 +		.extra1		= &set_partition_min,
1499 +		.extra2		= &set_partition_max,
1500 +	},	
1501 +	{
1502 +		.procname	= "Call_LC_set",
1503 +		.mode		= 0666,
1504 +		.proc_handler	= set_partition_handler,
1505 +		.data		= &set_partition[8],
1506 +		.maxlen		= sizeof(set_partition[8]),
1507 +		.extra1		= &set_partition_min,
1508 +		.extra2		= &set_partition_max,
1509 +	},	
1510 +	{
1511 +		.procname	= "C0_LA_bank",
1512 +		.mode		= 0666,
1513 +		.proc_handler	= bank_partition_handler,
1514 +		.data		= &bank_partition[0],
1515 +		.maxlen		= sizeof(set_partition[0]),
1516 +		.extra1		= &bank_partition_min,
1517 +		.extra2		= &bank_partition_max,
1518 +	},
1519 +	{
1520 +		.procname	= "C0_LB_bank",
1521 +		.mode		= 0666,
1522 +		.proc_handler	= bank_partition_handler,
1523 +		.data		= &bank_partition[1],
1524 +		.maxlen		= sizeof(set_partition[1]),
1525 +		.extra1		= &bank_partition_min,
1526 +		.extra2		= &bank_partition_max,
1527 +	},		
1528 +	{
1529 +		.procname	= "C1_LA_bank",
1530 +		.mode		= 0666,
1531 +		.proc_handler	= bank_partition_handler,
1532 +		.data		= &bank_partition[2],
1533 +		.maxlen		= sizeof(set_partition[2]),
1534 +		.extra1		= &bank_partition_min,
1535 +		.extra2		= &bank_partition_max,
1536 +	},
1537 +	{
1538 +		.procname	= "C1_LB_bank",
1539 +		.mode		= 0666,
1540 +		.proc_handler	= bank_partition_handler,
1541 +		.data		= &bank_partition[3],
1542 +		.maxlen		= sizeof(set_partition[3]),
1543 +		.extra1		= &bank_partition_min,
1544 +		.extra2		= &bank_partition_max,
1545 +	},
1546 +	{
1547 +		.procname	= "C2_LA_bank",
1548 +		.mode		= 0666,
1549 +		.proc_handler	= bank_partition_handler,
1550 +		.data		= &bank_partition[4],
1551 +		.maxlen		= sizeof(set_partition[4]),
1552 +		.extra1		= &bank_partition_min,
1553 +		.extra2		= &bank_partition_max,
1554 +	},	
1555 +	{
1556 +		.procname	= "C2_LB_bank",
1557 +		.mode		= 0666,
1558 +		.proc_handler	= bank_partition_handler,
1559 +		.data		= &bank_partition[5],
1560 +		.maxlen		= sizeof(set_partition[5]),
1561 +		.extra1		= &bank_partition_min,
1562 +		.extra2		= &bank_partition_max,
1563 +	},		
1564 +	{
1565 +		.procname	= "C3_LA_bank",
1566 +		.mode		= 0666,
1567 +		.proc_handler	= bank_partition_handler,
1568 +		.data		= &bank_partition[6],
1569 +		.maxlen		= sizeof(set_partition[6]),
1570 +		.extra1		= &bank_partition_min,
1571 +		.extra2		= &bank_partition_max,
1572 +	},	
1573 +	{
1574 +		.procname	= "C3_LB_bank",
1575 +		.mode		= 0666,
1576 +		.proc_handler	= bank_partition_handler,
1577 +		.data		= &bank_partition[7],
1578 +		.maxlen		= sizeof(set_partition[7]),
1579 +		.extra1		= &bank_partition_min,
1580 +		.extra2		= &bank_partition_max,
1581 +	},	
1582 +	{
1583 +		.procname	= "Call_LC_bank",
1584 +		.mode		= 0666,
1585 +		.proc_handler	= bank_partition_handler,
1586 +		.data		= &bank_partition[8],
1587 +		.maxlen		= sizeof(set_partition[8]),
1588 +		.extra1		= &bank_partition_min,
1589 +		.extra2		= &bank_partition_max,
1590 +	},	
1591 +	{
1592 +		.procname	= "show_page_pool",
1593 +		.mode		= 0666,
1594 +		.proc_handler	= show_page_pool_handler,
1595 +		.data		= &show_page_pool,
1596 +		.maxlen		= sizeof(show_page_pool),
1597 +	},		{
1598 +		.procname	= "refill_page_pool",
1599 +		.mode		= 0666,
1600 +		.proc_handler	= refill_page_pool_handler,
1601 +		.data		= &refill_page_pool,
1602 +		.maxlen		= sizeof(refill_page_pool),
1603 +	},	
1604 +	{ }
1605 +};
1606 +
1607 +static struct ctl_table litmus_dir_table[] = {
1608 +	{
1609 +		.procname	= "litmus",
1610 + 		.mode		= 0555,
1611 +		.child		= cache_table,
1612 +	},
1613 +	{ }
1614 +};
1615 +
1616 +
1617 +static struct ctl_table_header *litmus_sysctls;
1618 +
1619 +
1620 +/*
1621 + * Initialzie this proc 
1622 + */
1623 +static int __init litmus_color_init(void)
1624 +{
1625 +	int err=0;
1626 +        printk("Init bankproc.c\n");
1627 +
1628 +	init_variables();
1629 +
1630 +	printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
1631 +
1632 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
1633 +	if (!litmus_sysctls) {
1634 +		printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
1635 +		err = -EFAULT;
1636 +		goto out;
1637 +	}
1638 +
1639 +	init_color_groups();			
1640 +	do_add_pages();
1641 +
1642 +	printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
1643 +out:
1644 +	return err;
1645 +}
1646 +
1647 +module_init(litmus_color_init);
1648 +
1649 diff --git a/litmus/budget.c b/litmus/budget.c
1650 index 47bf78a..d67f4b3 100644
1651 --- a/litmus/budget.c
1652 +++ b/litmus/budget.c
1653 @@ -1,9 +1,11 @@
1654  #include <linux/sched.h>
1655  #include <linux/percpu.h>
1656  #include <linux/hrtimer.h>
1657 +#include <linux/uaccess.h>
1658  
1659  #include <litmus/litmus.h>
1660  #include <litmus/preempt.h>
1661 +#include <litmus/sched_plugin.h>
1662  
1663  #include <litmus/budget.h>
1664  
1665 @@ -113,4 +115,54 @@ static int __init init_budget_enforcement(void)
1666  	return 0;
1667  }
1668  
1669 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining)
1670 +{
1671 +	struct task_struct *t = current;
1672 +	unsigned long flags;
1673 +	s64 delta;
1674 +
1675 +	local_irq_save(flags);
1676 +
1677 +	delta = sched_clock_cpu(smp_processor_id()) - t->se.exec_start;
1678 +	if (delta < 0)
1679 +		delta = 0;
1680 +
1681 +	TRACE_CUR("current_budget: sc:%llu start:%llu lt_t:%llu delta:%lld exec-time:%llu rem:%llu\n",
1682 +		sched_clock_cpu(smp_processor_id()), t->se.exec_start,
1683 +		litmus_clock(), delta,
1684 +		tsk_rt(t)->job_params.exec_time,
1685 +		budget_remaining(t));
1686 +
1687 +	if (used_so_far)
1688 +		*used_so_far = tsk_rt(t)->job_params.exec_time + delta;
1689 +
1690 +	if (remaining) {
1691 +		*remaining = budget_remaining(t);
1692 +		if (*remaining > delta)
1693 +			*remaining -= delta;
1694 +		else
1695 +			*remaining = 0;
1696 +	}
1697 +
1698 +	local_irq_restore(flags);
1699 +}
1700 +
1701 +asmlinkage long sys_get_current_budget(
1702 +	lt_t __user * _expended,
1703 +	lt_t __user *_remaining)
1704 +{
1705 +	lt_t expended = 0, remaining = 0;
1706 +
1707 +	if (is_realtime(current))
1708 +		litmus->current_budget(&expended, &remaining);
1709 +
1710 +	if (_expended && put_user(expended, _expended))
1711 +		return -EFAULT;
1712 +
1713 +	if (_remaining && put_user(remaining, _remaining))
1714 +		return -EFAULT;
1715 +
1716 +	return 0;
1717 +}
1718 +
1719  module_init(init_budget_enforcement);
1720 diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
1721 new file mode 100644
1722 index 0000000..15c1b01
1723 --- /dev/null
1724 +++ b/litmus/cache_proc.c
1725 @@ -0,0 +1,1336 @@
1726 +#include <asm/uaccess.h>
1727 +#include <linux/uaccess.h>
1728 +#include <linux/init.h>
1729 +#include <linux/types.h>
1730 +#include <linux/kernel.h>
1731 +#include <linux/module.h>
1732 +#include <linux/sysctl.h>
1733 +#include <linux/slab.h>
1734 +#include <linux/io.h>
1735 +#include <linux/mutex.h>
1736 +#include <linux/time.h>
1737 +#include <linux/random.h>
1738 +
1739 +#include <litmus/litmus_proc.h>
1740 +#include <litmus/sched_trace.h>
1741 +#include <litmus/cache_proc.h>
1742 +#include <litmus/mc2_common.h>
1743 +
1744 +#include <asm/hardware/cache-l2x0.h>
1745 +#include <asm/cacheflush.h>
1746 +
1747 +
1748 +#define UNLOCK_ALL	0x00000000 /* allocation in any way */
1749 +#define LOCK_ALL        (~UNLOCK_ALL)
1750 +#define MAX_NR_WAYS	16
1751 +#define MAX_NR_COLORS	16
1752 +#define CACHELINE_SIZE 32
1753 +#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
1754 +#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
1755 +
1756 +typedef struct cacheline
1757 +{
1758 +        int line[INTS_IN_CACHELINE];
1759 +} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
1760 +
1761 +void mem_lock(u32 lock_val, int cpu);
1762 +
1763 +/*
1764 + * unlocked_way[i] : allocation can occur in way i
1765 + *
1766 + * 0 = allocation can occur in the corresponding way
1767 + * 1 = allocation cannot occur in the corresponding way
1768 + */
1769 +u32 unlocked_way[MAX_NR_WAYS]  = {
1770 +	0xFFFFFFFE, /* way 0 unlocked */
1771 +	0xFFFFFFFD,
1772 +	0xFFFFFFFB,
1773 +	0xFFFFFFF7,
1774 +	0xFFFFFFEF, /* way 4 unlocked */
1775 +	0xFFFFFFDF,
1776 +	0xFFFFFFBF,
1777 +	0xFFFFFF7F,
1778 +	0xFFFFFEFF, /* way 8 unlocked */
1779 +	0xFFFFFDFF,
1780 +	0xFFFFFBFF,
1781 +	0xFFFFF7FF,
1782 +	0xFFFFEFFF, /* way 12 unlocked */
1783 +	0xFFFFDFFF,
1784 +	0xFFFFBFFF,
1785 +	0xFFFF7FFF,
1786 +};
1787 +
1788 +u32 nr_unlocked_way[MAX_NR_WAYS+1]  = {
1789 +	0x0000FFFF, /* all ways are locked. usable = 0*/
1790 +	0x0000FFFE, /* way ~0 unlocked. usable = 1 */
1791 +	0x0000FFFC,
1792 +	0x0000FFF8,
1793 +	0x0000FFF0,
1794 +	0x0000FFE0,
1795 +	0x0000FFC0,
1796 +	0x0000FF80,
1797 +	0x0000FF00,
1798 +	0x0000FE00,
1799 +	0x0000FC00,
1800 +	0x0000F800,
1801 +	0x0000F000,
1802 +	0x0000E000,
1803 +	0x0000C000,
1804 +	0x00008000,
1805 +	0x00000000, /* way ~15 unlocked. usable = 16 */
1806 +};
1807 +
1808 +u32 way_partition[4] = {
1809 +	0xfffffff0, /* cpu0 */
1810 +	0xffffff0f, /* cpu1 */
1811 +	0xfffff0ff, /* cpu2 */
1812 +	0xffff0fff, /* cpu3 */
1813 +};
1814 +
1815 +u32 way_partitions[9] = {
1816 +	0xffff0003, /* cpu0 A */
1817 +	0xffff0003, /* cpu0 B */
1818 +	0xffff000C, /* cpu1 A */
1819 +	0xffff000C, /* cpu1 B */
1820 +	0xffff0030, /* cpu2 A */
1821 +	0xffff0030, /* cpu2 B */
1822 +	0xffff00C0, /* cpu3 A */
1823 +	0xffff00C0, /* cpu3 B */
1824 +	0xffffff00, /* lv C */
1825 +};
1826 +
1827 +u32 prev_lockdown_d_reg[5] = {
1828 +	0x0000FF00,
1829 +	0x0000FF00,
1830 +	0x0000FF00,
1831 +	0x0000FF00,
1832 +	0x000000FF, /* share with level-C */
1833 +};
1834 +
1835 +u32 prev_lockdown_i_reg[5] = {
1836 +	0x0000FF00,
1837 +	0x0000FF00,
1838 +	0x0000FF00,
1839 +	0x0000FF00,
1840 +	0x000000FF, /* share with level-C */
1841 +};
1842 +
1843 +u32 prev_lbm_i_reg[8] = {
1844 +	0x00000000,
1845 +	0x00000000,
1846 +	0x00000000,
1847 +	0x00000000,
1848 +	0x00000000,
1849 +	0x00000000,
1850 +	0x00000000,
1851 +	0x00000000,
1852 +};
1853 +
1854 +u32 prev_lbm_d_reg[8] = {
1855 +	0x00000000,
1856 +	0x00000000,
1857 +	0x00000000,
1858 +	0x00000000,
1859 +	0x00000000,
1860 +	0x00000000,
1861 +	0x00000000,
1862 +	0x00000000,
1863 +};
1864 +
1865 +static void __iomem *cache_base;
1866 +static void __iomem *lockreg_d;
1867 +static void __iomem *lockreg_i;
1868 +
1869 +static u32 cache_id;
1870 +
1871 +struct mutex actlr_mutex;
1872 +struct mutex l2x0_prefetch_mutex;
1873 +struct mutex lockdown_proc;
1874 +static u32 way_partition_min;
1875 +static u32 way_partition_max;
1876 +
1877 +static int zero = 0;
1878 +static int one = 1;
1879 +
1880 +static int l1_prefetch_proc;
1881 +static int l2_prefetch_hint_proc;
1882 +static int l2_double_linefill_proc;
1883 +static int l2_data_prefetch_proc;
1884 +static int os_isolation;
1885 +static int use_part;
1886 +
1887 +u32 lockdown_reg[9] = {
1888 +	0x00000000,
1889 +	0x00000000,
1890 +	0x00000000,
1891 +	0x00000000,
1892 +	0x00000000,
1893 +	0x00000000,
1894 +	0x00000000,
1895 +	0x00000000,
1896 +};
1897 +	
1898 +
1899 +#define ld_d_reg(cpu) ({ int __cpu = cpu; \
1900 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
1901 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
1902 +#define ld_i_reg(cpu) ({ int __cpu = cpu; \
1903 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
1904 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
1905 +
1906 +int lock_all;
1907 +int nr_lockregs;
1908 +static raw_spinlock_t cache_lock;
1909 +static raw_spinlock_t prefetch_lock;
1910 +static void ***flusher_pages = NULL;
1911 +
1912 +extern void l2c310_flush_all(void);
1913 +
1914 +static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
1915 +{
1916 +	/* wait for cache operation by line or way to complete */
1917 +	while (readl_relaxed(reg) & mask)
1918 +		cpu_relax();
1919 +}
1920 +
1921 +#ifdef CONFIG_CACHE_L2X0 
1922 +static inline void cache_wait(void __iomem *reg, unsigned long mask)
1923 +{
1924 +	/* cache operations by line are atomic on PL310 */
1925 +}
1926 +#else
1927 +#define cache_wait	cache_wait_way
1928 +#endif
1929 +
1930 +static inline void cache_sync(void)
1931 +{
1932 +	void __iomem *base = cache_base;
1933 +
1934 +	writel_relaxed(0, base + L2X0_CACHE_SYNC);
1935 +	cache_wait(base + L2X0_CACHE_SYNC, 1);
1936 +}
1937 +
1938 +static void print_lockdown_registers(int cpu)
1939 +{
1940 +	int i;
1941 +	//for (i = 0; i < nr_lockregs; i++) {
1942 +	for (i = 0; i < 4; i++) {
1943 +		printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
1944 +				i, readl_relaxed(ld_d_reg(i)));
1945 +		printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
1946 +				i, readl_relaxed(ld_i_reg(i)));
1947 +	}
1948 +}
1949 +
1950 +static void test_lockdown(void *ignore)
1951 +{
1952 +	int i, cpu;
1953 +
1954 +	cpu = smp_processor_id();
1955 +	printk("Start lockdown test on CPU %d.\n", cpu);
1956 +
1957 +	for (i = 0; i < nr_lockregs; i++) {
1958 +		printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
1959 +		printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i));
1960 +	}
1961 +
1962 +	printk("Lockdown initial state:\n");
1963 +	print_lockdown_registers(cpu);
1964 +	printk("---\n");
1965 +
1966 +	for (i = 0; i < nr_lockregs; i++) {
1967 +		writel_relaxed(1, ld_d_reg(i));
1968 +		writel_relaxed(2, ld_i_reg(i));
1969 +	}
1970 +	printk("Lockdown all data=1 instr=2:\n");
1971 +	print_lockdown_registers(cpu);
1972 +	printk("---\n");
1973 +
1974 +	for (i = 0; i < nr_lockregs; i++) {
1975 +		writel_relaxed((1 << i), ld_d_reg(i));
1976 +		writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
1977 +	}
1978 +	printk("Lockdown varies:\n");
1979 +	print_lockdown_registers(cpu);
1980 +	printk("---\n");
1981 +
1982 +	for (i = 0; i < nr_lockregs; i++) {
1983 +		writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
1984 +		writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
1985 +	}
1986 +	printk("Lockdown all zero:\n");
1987 +	print_lockdown_registers(cpu);
1988 +
1989 +	printk("End lockdown test.\n");
1990 +}
1991 +
1992 +void litmus_setup_lockdown(void __iomem *base, u32 id)
1993 +{
1994 +	cache_base = base;
1995 +	cache_id = id;
1996 +	lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
1997 +	lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
1998 +    
1999 +	if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
2000 +		nr_lockregs = 8;
2001 +	} else {
2002 +		printk("Unknown cache ID!\n");
2003 +		nr_lockregs = 1;
2004 +	}
2005 +	
2006 +	mutex_init(&actlr_mutex);
2007 +	mutex_init(&l2x0_prefetch_mutex);
2008 +	mutex_init(&lockdown_proc);
2009 +	raw_spin_lock_init(&cache_lock);
2010 +	raw_spin_lock_init(&prefetch_lock);
2011 +	
2012 +	test_lockdown(NULL);
2013 +}
2014 +
2015 +int way_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2016 +		size_t *lenp, loff_t *ppos)
2017 +{
2018 +	int ret = 0, i;
2019 +	unsigned long flags;
2020 +	
2021 +	mutex_lock(&lockdown_proc);
2022 +	
2023 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2024 +	if (ret)
2025 +		goto out;
2026 +	
2027 +	if (write) {
2028 +		printk("Way-partition settings:\n");
2029 +		for (i = 0; i < 9; i++) {
2030 +			printk("0x%08X\n", way_partitions[i]);
2031 +		}
2032 +		for (i = 0; i < 4; i++) {
2033 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2034 +				       i * L2X0_LOCKDOWN_STRIDE);
2035 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2036 +				       i * L2X0_LOCKDOWN_STRIDE);
2037 +		}
2038 +	}
2039 +	
2040 +	local_irq_save(flags);
2041 +	print_lockdown_registers(smp_processor_id());
2042 +	l2c310_flush_all();
2043 +	local_irq_restore(flags);
2044 +out:
2045 +	mutex_unlock(&lockdown_proc);
2046 +	return ret;
2047 +}
2048 +
2049 +int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
2050 +		size_t *lenp, loff_t *ppos)
2051 +{
2052 +	int ret = 0, i;
2053 +	unsigned long flags;
2054 +	
2055 +	mutex_lock(&lockdown_proc);
2056 +	
2057 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2058 +	if (ret)
2059 +		goto out;
2060 +	
2061 +	if (write && lock_all == 1) {
2062 +		for (i = 0; i < nr_lockregs; i++) {
2063 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2064 +				       i * L2X0_LOCKDOWN_STRIDE);
2065 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2066 +				       i * L2X0_LOCKDOWN_STRIDE);
2067 +		}
2068 +/*		
2069 +		for (i = 0; i < nr_lockregs;  i++) {
2070 +			barrier();
2071 +			mem_lock(LOCK_ALL, i);
2072 +			barrier();
2073 +			//writel_relaxed(nr_unlocked_way[0], ld_d_reg(i));
2074 +			//writel_relaxed(nr_unlocked_way[0], ld_i_reg(i));
2075 +		}
2076 +*/		
2077 +	}
2078 +	if (write && lock_all == 0) {
2079 +		for (i = 0; i < nr_lockregs; i++) {
2080 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2081 +				       i * L2X0_LOCKDOWN_STRIDE);
2082 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2083 +				       i * L2X0_LOCKDOWN_STRIDE);
2084 +		}
2085 +
2086 +	}
2087 +
2088 +	local_irq_save(flags);
2089 +	print_lockdown_registers(smp_processor_id());
2090 +	l2c310_flush_all();
2091 +	local_irq_restore(flags);
2092 +out:
2093 +	mutex_unlock(&lockdown_proc);
2094 +	return ret;
2095 +}
2096 +
2097 +void cache_lockdown(u32 lock_val, int cpu)
2098 +{
2099 +	__asm__ __volatile__ (
2100 +"	str	%[lockval], [%[dcachereg]]\n"
2101 +"	str	%[lockval], [%[icachereg]]\n"
2102 +	: 
2103 +	: [dcachereg] "r" (ld_d_reg(cpu)),
2104 +	  [icachereg] "r" (ld_i_reg(cpu)),
2105 +	  [lockval] "r" (lock_val)
2106 +	: "cc");
2107 +}
2108 +
2109 +void do_partition(enum crit_level lv, int cpu)
2110 +{
2111 +	u32 regs;
2112 +	unsigned long flags;
2113 +	
2114 +	if (lock_all || !use_part)
2115 +		return;
2116 +	raw_spin_lock_irqsave(&cache_lock, flags);
2117 +	switch(lv) {
2118 +		case CRIT_LEVEL_A:
2119 +			regs = ~way_partitions[cpu*2];
2120 +			regs &= 0x0000ffff;
2121 +			break;
2122 +		case CRIT_LEVEL_B:
2123 +			regs = ~way_partitions[cpu*2+1];
2124 +			regs &= 0x0000ffff;
2125 +			break;
2126 +		case CRIT_LEVEL_C:
2127 +		case NUM_CRIT_LEVELS:
2128 +			regs = ~way_partitions[8];
2129 +			regs &= 0x0000ffff;
2130 +			break;
2131 +		default:
2132 +			BUG();
2133 +
2134 +	}
2135 +	barrier();
2136 +
2137 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2138 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2139 +	barrier();
2140 +
2141 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
2142 +}
2143 +
2144 +void lock_cache(int cpu, u32 val)
2145 +{
2146 +	unsigned long flags;
2147 +	
2148 +	local_irq_save(flags);
2149 +	if (val != 0xffffffff) {
2150 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2151 +					   cpu * L2X0_LOCKDOWN_STRIDE);
2152 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2153 +					   cpu * L2X0_LOCKDOWN_STRIDE);
2154 +	}
2155 +	else {
2156 +		int i;
2157 +		for (i = 0; i < 4; i++)
2158 +			do_partition(CRIT_LEVEL_A, i);
2159 +	}
2160 +	local_irq_restore(flags);
2161 +}
2162 +
2163 +int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
2164 +		size_t *lenp, loff_t *ppos)
2165 +{
2166 +	int ret = 0;
2167 +	
2168 +	mutex_lock(&lockdown_proc);
2169 +
2170 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2171 +	if (ret)
2172 +		goto out;
2173 +	
2174 +
2175 +	printk("USE_PART HANDLER = %d\n", use_part);
2176 +
2177 +out:
2178 +	mutex_unlock(&lockdown_proc);
2179 +	return ret;
2180 +}
2181 +
2182 +int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
2183 +		size_t *lenp, loff_t *ppos)
2184 +{
2185 +	int ret = 0;
2186 +	
2187 +	mutex_lock(&lockdown_proc);
2188 +	
2189 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2190 +	if (ret)
2191 +		goto out;
2192 +	
2193 +
2194 +	printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
2195 +
2196 +out:
2197 +	mutex_unlock(&lockdown_proc);
2198 +	return ret;
2199 +}
2200 +
2201 +int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
2202 +		size_t *lenp, loff_t *ppos)
2203 +{
2204 +	int ret = 0, i;
2205 +	
2206 +	mutex_lock(&lockdown_proc);
2207 +	
2208 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2209 +	if (ret)
2210 +		goto out;
2211 +	
2212 +	if (write) {
2213 +		for (i = 0; i < nr_lockregs; i++) {
2214 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2215 +				       i * L2X0_LOCKDOWN_STRIDE);
2216 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2217 +				       i * L2X0_LOCKDOWN_STRIDE);
2218 +		}
2219 +	}
2220 +
2221 +out:
2222 +	mutex_unlock(&lockdown_proc);
2223 +	return ret;
2224 +}
2225 +
2226 +int lockdown_global_handler(struct ctl_table *table, int write, void __user *buffer,
2227 +		size_t *lenp, loff_t *ppos)
2228 +{
2229 +	int ret = 0, i;
2230 +	
2231 +	mutex_lock(&lockdown_proc);
2232 +	
2233 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2234 +	if (ret)
2235 +		goto out;
2236 +	
2237 +	if (write) {
2238 +		for (i = 0; i < nr_lockregs; i++) {
2239 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2240 +				       i * L2X0_LOCKDOWN_STRIDE);
2241 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2242 +				       i * L2X0_LOCKDOWN_STRIDE);
2243 +		}
2244 +	}
2245 +
2246 +out:
2247 +	mutex_unlock(&lockdown_proc);
2248 +	return ret;
2249 +}
2250 +
2251 +void inline enter_irq_mode(void)
2252 +{
2253 +	int cpu = smp_processor_id();
2254 +
2255 +	if (os_isolation == 0)
2256 +		return;	
2257 +	prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2258 +	prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2259 +	
2260 +	writel_relaxed(way_partitions[8], ld_i_reg(cpu));
2261 +	writel_relaxed(way_partitions[8], ld_d_reg(cpu));
2262 +}
2263 +
2264 +void inline exit_irq_mode(void)
2265 +{
2266 +	int cpu = smp_processor_id();
2267 +
2268 +	if (os_isolation == 0)
2269 +		return;
2270 +	writel_relaxed(prev_lockdown_i_reg[cpu], ld_i_reg(cpu));
2271 +	writel_relaxed(prev_lockdown_d_reg[cpu], ld_d_reg(cpu));	
2272 +}
2273 +
2274 +/* Operate on the Cortex-A9's ACTLR register */
2275 +#define ACTLR_L2_PREFETCH_HINT	(1 << 1)
2276 +#define ACTLR_L1_PREFETCH	(1 << 2)
2277 +
2278 +/*
2279 + * Change the ACTLR.
2280 + * @mode	- If 1 (0), set (clear) the bit given in @mask in the ACTLR.
2281 + * @mask	- A mask in which one bit is set to operate on the ACTLR.
2282 + */
2283 +static void actlr_change(int mode, int mask)
2284 +{
2285 +	u32 orig_value, new_value, reread_value;
2286 +
2287 +	if (0 != mode && 1 != mode) {
2288 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2289 +				__FUNCTION__);
2290 +		return;
2291 +	}
2292 +
2293 +	/* get the original value */
2294 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (orig_value));
2295 +
2296 +	if (0 == mode)
2297 +		new_value = orig_value & ~(mask);
2298 +	else
2299 +		new_value = orig_value | mask;
2300 +
2301 +	asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (new_value));
2302 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (reread_value));
2303 +
2304 +	printk("ACTLR: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2305 +			orig_value, new_value, reread_value);
2306 +}
2307 +
2308 +int litmus_l1_prefetch_proc_handler(struct ctl_table *table, int write,
2309 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2310 +{
2311 +	int ret, mode;
2312 +
2313 +	mutex_lock(&actlr_mutex);
2314 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2315 +
2316 +	if (!ret && write) {
2317 +		mode = *((int*)table->data);
2318 +		actlr_change(mode, ACTLR_L1_PREFETCH);
2319 +	}
2320 +	mutex_unlock(&actlr_mutex);
2321 +
2322 +	return ret;
2323 +}
2324 +
2325 +int litmus_l2_prefetch_hint_proc_handler(struct ctl_table *table, int write,
2326 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2327 +{
2328 +	int ret, mode;
2329 +
2330 +	mutex_lock(&actlr_mutex);
2331 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2332 +	if (!ret && write) {
2333 +		mode = *((int*)table->data);
2334 +		actlr_change(mode, ACTLR_L2_PREFETCH_HINT);
2335 +	}
2336 +	mutex_unlock(&actlr_mutex);
2337 +
2338 +	return ret;
2339 +}
2340 +
2341 +
2342 +/* Operate on the PL-310's Prefetch Control Register, L310_PREFETCH_CTRL */
2343 +#define L2X0_PREFETCH_DOUBLE_LINEFILL	(1 << 30)
2344 +#define L2X0_PREFETCH_INST_PREFETCH	(1 << 29)
2345 +#define L2X0_PREFETCH_DATA_PREFETCH	(1 << 28)
2346 +static void l2x0_prefetch_change(int mode, int mask)
2347 +{
2348 +	u32 orig_value, new_value, reread_value;
2349 +
2350 +	if (0 != mode && 1 != mode) {
2351 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2352 +				__FUNCTION__);
2353 +		return;
2354 +	}
2355 +
2356 +	orig_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2357 +
2358 +	if (0 == mode)
2359 +		new_value = orig_value & ~(mask);
2360 +	else
2361 +		new_value = orig_value | mask;
2362 +
2363 +	writel_relaxed(new_value, cache_base + L310_PREFETCH_CTRL);
2364 +	reread_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2365 +
2366 +	printk("l2x0 prefetch: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2367 +			orig_value, new_value, reread_value);
2368 +}
2369 +
2370 +int litmus_l2_double_linefill_proc_handler(struct ctl_table *table, int write,
2371 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2372 +{
2373 +	int ret, mode;
2374 +
2375 +	mutex_lock(&l2x0_prefetch_mutex);
2376 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2377 +	if (!ret && write) {
2378 +		mode = *((int*)table->data);
2379 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DOUBLE_LINEFILL);
2380 +	}
2381 +	mutex_unlock(&l2x0_prefetch_mutex);
2382 +
2383 +	return ret;
2384 +}
2385 +
2386 +int litmus_l2_data_prefetch_proc_handler(struct ctl_table *table, int write,
2387 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2388 +{
2389 +	int ret, mode;
2390 +
2391 +	mutex_lock(&l2x0_prefetch_mutex);
2392 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2393 +	if (!ret && write) {
2394 +		mode = *((int*)table->data);
2395 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DATA_PREFETCH|L2X0_PREFETCH_INST_PREFETCH);
2396 +	}
2397 +	mutex_unlock(&l2x0_prefetch_mutex);
2398 +
2399 +	return ret;
2400 +}
2401 +
2402 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
2403 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2404 +
2405 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
2406 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2407 +		
2408 +static struct ctl_table cache_table[] =
2409 +{
2410 +	{
2411 +		.procname	= "C0_LA_way",
2412 +		.mode		= 0666,
2413 +		.proc_handler	= way_partition_handler,
2414 +		.data		= &way_partitions[0],
2415 +		.maxlen		= sizeof(way_partitions[0]),
2416 +		.extra1		= &way_partition_min,
2417 +		.extra2		= &way_partition_max,
2418 +	},	
2419 +	{
2420 +		.procname	= "C0_LB_way",
2421 +		.mode		= 0666,
2422 +		.proc_handler	= way_partition_handler,
2423 +		.data		= &way_partitions[1],
2424 +		.maxlen		= sizeof(way_partitions[1]),
2425 +		.extra1		= &way_partition_min,
2426 +		.extra2		= &way_partition_max,
2427 +	},	
2428 +	{
2429 +		.procname	= "C1_LA_way",
2430 +		.mode		= 0666,
2431 +		.proc_handler	= way_partition_handler,
2432 +		.data		= &way_partitions[2],
2433 +		.maxlen		= sizeof(way_partitions[2]),
2434 +		.extra1		= &way_partition_min,
2435 +		.extra2		= &way_partition_max,
2436 +	},
2437 +	{
2438 +		.procname	= "C1_LB_way",
2439 +		.mode		= 0666,
2440 +		.proc_handler	= way_partition_handler,
2441 +		.data		= &way_partitions[3],
2442 +		.maxlen		= sizeof(way_partitions[3]),
2443 +		.extra1		= &way_partition_min,
2444 +		.extra2		= &way_partition_max,
2445 +	},
2446 +	{
2447 +		.procname	= "C2_LA_way",
2448 +		.mode		= 0666,
2449 +		.proc_handler	= way_partition_handler,
2450 +		.data		= &way_partitions[4],
2451 +		.maxlen		= sizeof(way_partitions[4]),
2452 +		.extra1		= &way_partition_min,
2453 +		.extra2		= &way_partition_max,
2454 +	},
2455 +	{
2456 +		.procname	= "C2_LB_way",
2457 +		.mode		= 0666,
2458 +		.proc_handler	= way_partition_handler,
2459 +		.data		= &way_partitions[5],
2460 +		.maxlen		= sizeof(way_partitions[5]),
2461 +		.extra1		= &way_partition_min,
2462 +		.extra2		= &way_partition_max,
2463 +	},
2464 +	{
2465 +		.procname	= "C3_LA_way",
2466 +		.mode		= 0666,
2467 +		.proc_handler	= way_partition_handler,
2468 +		.data		= &way_partitions[6],
2469 +		.maxlen		= sizeof(way_partitions[6]),
2470 +		.extra1		= &way_partition_min,
2471 +		.extra2		= &way_partition_max,
2472 +	},
2473 +	{
2474 +		.procname	= "C3_LB_way",
2475 +		.mode		= 0666,
2476 +		.proc_handler	= way_partition_handler,
2477 +		.data		= &way_partitions[7],
2478 +		.maxlen		= sizeof(way_partitions[7]),
2479 +		.extra1		= &way_partition_min,
2480 +		.extra2		= &way_partition_max,
2481 +	},	
2482 +	{
2483 +		.procname	= "Call_LC_way",
2484 +		.mode		= 0666,
2485 +		.proc_handler	= way_partition_handler,
2486 +		.data		= &way_partitions[8],
2487 +		.maxlen		= sizeof(way_partitions[8]),
2488 +		.extra1		= &way_partition_min,
2489 +		.extra2		= &way_partition_max,
2490 +	},		
2491 +	{
2492 +		.procname	= "lock_all",
2493 +		.mode		= 0666,
2494 +		.proc_handler	= lock_all_handler,
2495 +		.data		= &lock_all,
2496 +		.maxlen		= sizeof(lock_all),
2497 +		.extra1		= &zero,
2498 +		.extra2		= &one,
2499 +	},
2500 +	{
2501 +		.procname	= "l1_prefetch",
2502 +		.mode		= 0644,
2503 +		.proc_handler	= litmus_l1_prefetch_proc_handler,
2504 +		.data		= &l1_prefetch_proc,
2505 +		.maxlen		= sizeof(l1_prefetch_proc),
2506 +	},
2507 +	{
2508 +		.procname	= "l2_prefetch_hint",
2509 +		.mode		= 0644,
2510 +		.proc_handler	= litmus_l2_prefetch_hint_proc_handler,
2511 +		.data		= &l2_prefetch_hint_proc,
2512 +		.maxlen		= sizeof(l2_prefetch_hint_proc),
2513 +	},
2514 +	{
2515 +		.procname	= "l2_double_linefill",
2516 +		.mode		= 0644,
2517 +		.proc_handler	= litmus_l2_double_linefill_proc_handler,
2518 +		.data		= &l2_double_linefill_proc,
2519 +		.maxlen		= sizeof(l2_double_linefill_proc),
2520 +	},
2521 +	{
2522 +		.procname	= "l2_data_prefetch",
2523 +		.mode		= 0644,
2524 +		.proc_handler	= litmus_l2_data_prefetch_proc_handler,
2525 +		.data		= &l2_data_prefetch_proc,
2526 +		.maxlen		= sizeof(l2_data_prefetch_proc),
2527 +	},
2528 +	{
2529 +		.procname	= "os_isolation",
2530 +		.mode		= 0644,
2531 +		.proc_handler	= os_isolation_proc_handler,
2532 +		.data		= &os_isolation,
2533 +		.maxlen		= sizeof(os_isolation),
2534 +	},
2535 +	{
2536 +		.procname	= "use_part",
2537 +		.mode		= 0644,
2538 +		.proc_handler	= use_part_proc_handler,
2539 +		.data		= &use_part,
2540 +		.maxlen		= sizeof(use_part),
2541 +	},
2542 +	{
2543 +		.procname	= "do_perf_test",
2544 +		.mode		= 0644,
2545 +		.proc_handler	= do_perf_test_proc_handler,
2546 +	},
2547 +	{
2548 +		.procname	= "setup_flusher",
2549 +		.mode		= 0644,
2550 +		.proc_handler	= setup_flusher_proc_handler,
2551 +	},
2552 +	{
2553 +		.procname	= "lockdown_reg_0",
2554 +		.mode		= 0644,
2555 +		.proc_handler	= lockdown_reg_handler,
2556 +		.data		= &lockdown_reg[0],
2557 +		.maxlen		= sizeof(lockdown_reg[0]),
2558 +		.extra1		= &way_partition_min,
2559 +		.extra2		= &way_partition_max,
2560 +	},
2561 +	{
2562 +		.procname	= "lockdown_reg_1",
2563 +		.mode		= 0644,
2564 +		.proc_handler	= lockdown_reg_handler,
2565 +		.data		= &lockdown_reg[1],
2566 +		.maxlen		= sizeof(lockdown_reg[1]),
2567 +		.extra1		= &way_partition_min,
2568 +		.extra2		= &way_partition_max,
2569 +	},
2570 +	{
2571 +		.procname	= "lockdown_reg_2",
2572 +		.mode		= 0644,
2573 +		.proc_handler	= lockdown_reg_handler,
2574 +		.data		= &lockdown_reg[2],
2575 +		.maxlen		= sizeof(lockdown_reg[2]),
2576 +		.extra1		= &way_partition_min,
2577 +		.extra2		= &way_partition_max,
2578 +	},
2579 +	{
2580 +		.procname	= "lockdown_reg_3",
2581 +		.mode		= 0644,
2582 +		.proc_handler	= lockdown_reg_handler,
2583 +		.data		= &lockdown_reg[3],
2584 +		.maxlen		= sizeof(lockdown_reg[3]),
2585 +		.extra1		= &way_partition_min,
2586 +		.extra2		= &way_partition_max,
2587 +	},
2588 +	{
2589 +		.procname	= "lockdown_regs",
2590 +		.mode		= 0644,
2591 +		.proc_handler	= lockdown_global_handler,
2592 +		.data		= &lockdown_reg[8],
2593 +		.maxlen		= sizeof(lockdown_reg[8]),
2594 +		.extra1		= &way_partition_min,
2595 +		.extra2		= &way_partition_max,
2596 +	},
2597 +	{ }
2598 +};
2599 +
2600 +static struct ctl_table litmus_dir_table[] = {
2601 +	{
2602 +		.procname	= "litmus",
2603 + 		.mode		= 0555,
2604 +		.child		= cache_table,
2605 +	},
2606 +	{ }
2607 +};
2608 +
2609 +u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
2610 +{
2611 +	u32 v = 0;
2612 +
2613 +	__asm__ __volatile__ (
2614 +"	.align 5\n"
2615 +"	str	%[lockval], [%[cachereg]]\n"
2616 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2617 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2618 +"	bgt	1b\n				@ read more, if necessary\n"
2619 +	: [addr] "+r" (start),
2620 +	  [val] "+r" (v)
2621 +	: [end] "r" (end),
2622 +#ifdef CONFIG_CACHE_L2X0
2623 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2624 +#else
2625 +	  [cachereg] "r" (lockreg_d),
2626 +#endif
2627 +	  [lockval] "r" (lock_val)
2628 +	: "cc");
2629 +
2630 +	return v;
2631 +}
2632 +
2633 +
2634 +/*
2635 + * Prefetch by reading the first word of each cache line in a page.
2636 + *
2637 + * @lockdown_reg: address of the lockdown register to write
2638 + * @lock_val: value to be written to @lockdown_reg
2639 + * @unlock_val: will unlock the cache to this value
2640 + * @addr: start address to be prefetched
2641 + * @end_addr: end address to prefetch (exclusive)
2642 + *
2643 + * Assumes: addr < end_addr AND addr != end_addr
2644 + */
2645 +u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
2646 +{
2647 +#ifndef CONFIG_CACHE_L2X0
2648 +	unsigned long flags;
2649 +#endif
2650 +	u32 v = 0;
2651 +
2652 +#ifndef CONFIG_CACHE_L2X0
2653 +	raw_spin_lock_irqsave(&prefetch_lock, flags);
2654 +#endif
2655 +
2656 +	__asm__ __volatile__ (
2657 +"	.align 5\n"
2658 +"	str	%[lockval], [%[cachereg]]\n"
2659 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2660 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2661 +"	bgt	1b\n				@ read more, if necessary\n"
2662 +"	str	%[unlockval], [%[cachereg]]\n"
2663 +	: [addr] "+r" (start),
2664 +	  [val] "+r" (v)
2665 +	: [end] "r" (end),
2666 +#ifdef CONFIG_CACHE_L2X0
2667 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2668 +#else
2669 +	  [cachereg] "r" (lockreg_d),
2670 +#endif
2671 +	  [lockval] "r" (lock_val),
2672 +	  [unlockval] "r" (unlock_val)
2673 +	: "cc");
2674 +
2675 +#ifndef CONFIG_CACHE_L2X0
2676 +	raw_spin_unlock_irqrestore(&prefetch_lock, flags);
2677 +#endif
2678 +
2679 +	return v;
2680 +}
2681 +
2682 +static long update_timeval(struct timespec lhs, struct timespec rhs)
2683 +{
2684 +	long val;
2685 +	struct timespec ts;
2686 +
2687 +	ts = timespec_sub(rhs, lhs);
2688 +	val = ts.tv_sec*NSEC_PER_SEC + ts.tv_nsec;
2689 +
2690 +	return val;
2691 +}
2692 +
2693 +extern void v7_flush_kern_dcache_area(void *, size_t);
2694 +extern void v7_flush_kern_cache_all(void);
2695 +/*
2696 + * Ensure that this page is not in the L1 or L2 cache.
2697 + * Since the L1 cache is VIPT and the L2 cache is PIPT, we can use either the
2698 + * kernel or user vaddr.
2699 + */
2700 +void color_flush_page(void *vaddr, size_t size)
2701 +{
2702 +	v7_flush_kern_dcache_area(vaddr, size);
2703 +	//v7_flush_kern_cache_all();
2704 +}
2705 +
2706 +extern struct page* get_colored_page(unsigned long color);
2707 +
2708 +int setup_flusher_array(void)
2709 +{
2710 +	int color, way, ret = 0;
2711 +	struct page *page;
2712 +
2713 +	if (flusher_pages != NULL)
2714 +		goto out;
2715 +
2716 +	flusher_pages = (void***) kmalloc(MAX_NR_WAYS
2717 +			* sizeof(*flusher_pages), GFP_KERNEL);
2718 +	if (!flusher_pages) {
2719 +		printk(KERN_WARNING "No memory for flusher array!\n");
2720 +		ret = -EINVAL;
2721 +		goto out;
2722 +	}
2723 +	for (way = 0; way < MAX_NR_WAYS; way++) {
2724 +		void **flusher_color_arr;
2725 +		flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
2726 +				* MAX_NR_COLORS, GFP_KERNEL);
2727 +		if (!flusher_color_arr) {
2728 +			printk(KERN_WARNING "No memory for flusher array!\n");
2729 +			ret = -ENOMEM;
2730 +			goto out_free;
2731 +		}
2732 +
2733 +		flusher_pages[way] = flusher_color_arr;
2734 +		for (color = 0; color < MAX_NR_COLORS; color++) {
2735 +			int node;
2736 +			node = color + 112; // populate from bank 7
2737 +			page = get_colored_page(node);
2738 +			if (!page) {
2739 +				printk(KERN_WARNING "no more colored pages\n");
2740 +				ret = -EINVAL;
2741 +				goto out_free;
2742 +			}
2743 +			flusher_pages[way][color] = page_address(page);
2744 +			if (!flusher_pages[way][color]) {
2745 +				printk(KERN_WARNING "bad page address\n");
2746 +				ret = -EINVAL;
2747 +				goto out_free;
2748 +			}
2749 +		}
2750 +	}
2751 +
2752 +out:
2753 +	return ret;
2754 +out_free:
2755 +	for (way = 0; way < MAX_NR_WAYS; way++) {
2756 +		for (color = 0; color < MAX_NR_COLORS; color++) {
2757 +			/* not bothering to try and give back colored pages */
2758 +		}
2759 +		kfree(flusher_pages[way]);
2760 +	}
2761 +	kfree(flusher_pages);
2762 +	flusher_pages = NULL;
2763 +	return ret;
2764 +}
2765 +
2766 +void flush_cache(int all)
2767 +{
2768 +	int way, color, cpu;
2769 +	unsigned long flags;
2770 +	
2771 +	raw_spin_lock_irqsave(&cache_lock, flags);
2772 +	cpu = raw_smp_processor_id();
2773 +	
2774 +	prev_lbm_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2775 +	prev_lbm_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2776 +	for (way=0;way<MAX_NR_WAYS;way++) {
2777 +		if (( (0x00000001 << way) & (prev_lbm_d_reg[cpu]) ) &&
2778 +			!all)
2779 +			continue;
2780 +		for (color=0;color<MAX_NR_COLORS;color++) {
2781 +			void *vaddr = flusher_pages[way][color];
2782 +			u32 lvalue  = unlocked_way[way];
2783 +			color_read_in_mem_lock(lvalue, LOCK_ALL,
2784 +					       vaddr, vaddr + PAGE_SIZE);
2785 +		}
2786 +
2787 +	}
2788 +
2789 +	writel_relaxed(prev_lbm_i_reg[cpu], ld_i_reg(cpu));
2790 +	writel_relaxed(prev_lbm_d_reg[cpu], ld_d_reg(cpu));
2791 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
2792 +}
2793 +
2794 +/* src = shared, dst = local */
2795 +#if 1 // random
2796 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
2797 +{
2798 +	/* size is in KB */
2799 +	long ret = 0;
2800 +	lt_t t1, t2;
2801 +	int numlines = size * CACHELINES_IN_1KB;
2802 +	int next, sum = 0, ran;
2803 +	unsigned long flags;
2804 +	
2805 +	get_random_bytes(&ran, sizeof(int));
2806 +	next = ran % ((size*1024)/sizeof(cacheline_t));
2807 +	
2808 +	//preempt_disable();
2809 +	if (type == 1) {
2810 +		int i, j;
2811 +		color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
2812 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2813 +		
2814 +		local_irq_save(flags);
2815 +		t1 = litmus_clock();
2816 +		for (i = 0; i < numlines; i++) {
2817 +			next = src[next].line[0];
2818 +			for (j = 1; j < INTS_IN_CACHELINE; j++) {
2819 +				//dst[next].line[j] = src[next].line[j]; // read
2820 +				src[next].line[j] = dst[next].line[j]; // write
2821 +			}			
2822 +		}
2823 +		t2 = litmus_clock();
2824 +		local_irq_restore(flags);
2825 +		sum = next + (int)t2;
2826 +		t2 -= t1;
2827 +		ret = put_user(t2, ts);
2828 +	}
2829 +	else {
2830 +		int i, j;
2831 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2832 +		local_irq_save(flags);
2833 +		t1 = litmus_clock();
2834 +		for (i = 0; i < numlines; i++) {
2835 +			next = src[next].line[0];
2836 +			for (j = 1; j < INTS_IN_CACHELINE; j++) {
2837 +				//dst[next].line[j] = src[next].line[j]; //read
2838 +				src[next].line[j] = dst[next].line[j]; //write
2839 +			}			
2840 +		}
2841 +		t2 = litmus_clock();
2842 +		local_irq_restore(flags);
2843 +		sum = next + (int)t2;
2844 +		t2 -= t1;
2845 +		ret = put_user(t2, ts);
2846 +		v7_flush_kern_dcache_area(src, size*1024);
2847 +	}
2848 +	//preempt_enable();
2849 +	flush_cache(1);
2850 +
2851 +	return ret;
2852 +}
2853 +#else
2854 +// sequential
2855 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
2856 +{
2857 +	/* size is in KB */
2858 +	long ret = 0;
2859 +	lt_t t1, t2;
2860 +	int numlines = size * CACHELINES_IN_1KB;
2861 +	int sum = 0;
2862 +	unsigned long flags;
2863 +	
2864 +	//preempt_disable();
2865 +	if (type == 1) {
2866 +		int i, j;
2867 +		color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
2868 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2869 +		
2870 +		local_irq_save(flags);
2871 +		t1 = litmus_clock();
2872 +		for (i = 0; i < numlines; i++) {
2873 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2874 +				//dst[i].line[j] = src[i].line[j]; // read
2875 +				src[i].line[j] = dst[i].line[j]; // write
2876 +			}			
2877 +		}
2878 +		t2 = litmus_clock();
2879 +		local_irq_restore(flags);
2880 +		sum = (int)(t1 + t2);
2881 +		t2 -= t1;
2882 +		ret = put_user(t2, ts);
2883 +	}
2884 +	else {
2885 +		int i, j;
2886 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2887 +		local_irq_save(flags);
2888 +		t1 = litmus_clock();
2889 +		for (i = 0; i < numlines; i++) {
2890 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2891 +				//dst[i].line[j] = src[i].line[j]; //read
2892 +				src[i].line[j] = dst[i].line[j]; //write
2893 +			}			
2894 +		}
2895 +		t2 = litmus_clock();
2896 +		local_irq_restore(flags);
2897 +		sum = (int)(t1 + t2);
2898 +		t2 -= t1;
2899 +		ret = put_user(t2, ts);
2900 +		v7_flush_kern_dcache_area(src, size*1024);
2901 +	}
2902 +	//preempt_enable();
2903 +	flush_cache(1);
2904 +
2905 +	return ret;
2906 +}
2907 +#endif
2908 +
2909 +asmlinkage long sys_lock_buffer(void* vaddr, size_t size, u32 lock_way, u32 unlock_way)
2910 +{
2911 +	/* size is in bytes */
2912 +	long ret = 0;
2913 +	int i;
2914 +	u32 lock_val, unlock_val;
2915 +	
2916 +	lock_val = ~lock_way & 0x0000ffff;
2917 +	unlock_val = ~unlock_way & 0x0000ffff;
2918 +	color_read_in_mem_lock(lock_val, unlock_val, (void*)vaddr, (void*)vaddr + size);
2919 +	
2920 +	return ret;
2921 +}
2922 +
2923 +#define TRIALS 1000
2924 +
2925 +static int perf_test(void) {
2926 +	struct timespec before, after;
2927 +	struct page *page;
2928 +	void *vaddr;
2929 +	u32 *data;
2930 +	long time, flush_time;
2931 +	int i, num_pages = 1;
2932 +	unsigned int order = 4;
2933 +
2934 +	for (i = 0; i < order; i++) {
2935 +		num_pages = num_pages*2;
2936 +	}
2937 +
2938 +	printk("Number of pages: %d\n", num_pages);
2939 +	//page = alloc_page(__GFP_MOVABLE);
2940 +	page = alloc_pages(__GFP_MOVABLE, order);
2941 +	if (!page) {
2942 +		printk(KERN_WARNING "No memory\n");
2943 +		return -ENOMEM;
2944 +	}
2945 +
2946 +	vaddr = page_address(page);
2947 +	if (!vaddr)
2948 +		printk(KERN_WARNING "%s: vaddr is null\n", __FUNCTION__);
2949 +	data = (u32*) vaddr;
2950 +
2951 +	getnstimeofday(&before);
2952 +	barrier();
2953 +	for (i = 0; i < TRIALS; i++) {
2954 +		color_flush_page(vaddr, PAGE_SIZE*num_pages);
2955 +	}
2956 +	barrier();
2957 +	getnstimeofday(&after);
2958 +	time = update_timeval(before, after);
2959 +	printk("Average for flushes without re-reading: %ld\n", time / TRIALS);
2960 +	flush_time = time / TRIALS;
2961 +
2962 +	color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2963 +	
2964 +	barrier();
2965 +	getnstimeofday(&before);
2966 +	barrier();
2967 +	for (i = 0; i < TRIALS; i++) {
2968 +		color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2969 +	}
2970 +	barrier();
2971 +	getnstimeofday(&after);
2972 +	time = update_timeval(before, after);
2973 +	printk("Average for read from cache: %ld\n", time / TRIALS);
2974 +
2975 +	getnstimeofday(&before);
2976 +	barrier();
2977 +	for (i = 0; i < TRIALS; i++) {
2978 +		color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2979 +		color_flush_page(vaddr, PAGE_SIZE*num_pages);
2980 +	}
2981 +	barrier();
2982 +	getnstimeofday(&after);
2983 +	time = update_timeval(before, after);
2984 +	printk("Average for read from mem: %ld (%ld)\n", time / TRIALS - flush_time, time / TRIALS);
2985 +
2986 +	// write in locked way
2987 +	color_read_in_mem_lock(nr_unlocked_way[2], LOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2988 +	for (i = 0; i < PAGE_SIZE*num_pages/sizeof(u32); i++) {
2989 +		data[i] = i%63353;
2990 +	}
2991 +	// read
2992 +	barrier();
2993 +	getnstimeofday(&before);
2994 +	barrier();
2995 +	for (i = 0; i < TRIALS; i++) {
2996 +		color_read_in_mem(unlocked_way[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2997 +	}
2998 +	barrier();
2999 +	getnstimeofday(&after);
3000 +	time = update_timeval(before, after);
3001 +	printk("Average for read in after write: %ld\n", time / TRIALS);
3002 +	
3003 +	
3004 +	//free_page((unsigned long)vaddr);
3005 +	free_pages((unsigned long)vaddr, order);
3006 +
3007 +	return 0;
3008 +}
3009 +
3010 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
3011 +		void __user *buffer, size_t *lenp, loff_t *ppos)
3012 +{
3013 +	int ret = 0;
3014 +
3015 +	if (write) {
3016 +		ret = perf_test();
3017 +	}
3018 +
3019 +	return ret;
3020 +}
3021 +
3022 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
3023 +		void __user *buffer, size_t *lenp, loff_t *ppos)
3024 +{
3025 +	int ret = -EINVAL;
3026 +
3027 +	if (write && flusher_pages == NULL) {
3028 +		ret = setup_flusher_array();
3029 +		printk(KERN_INFO "setup flusher return: %d\n", ret);
3030 +	
3031 +	}
3032 +	else if (flusher_pages) {
3033 +		printk(KERN_INFO "flusher_pages is already set!\n");
3034 +		ret = 0;
3035 +	}
3036 +	
3037 +	return ret;
3038 +}
3039 +</