Attachment 'litmus-rt-rtas17.patch'

Download

   1 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
   2 index 6e1fb2b..e2284fe 100644
   3 --- a/arch/arm/boot/compressed/Makefile
   4 +++ b/arch/arm/boot/compressed/Makefile
   5 @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
   6  ORIG_CFLAGS := $(KBUILD_CFLAGS)
   7  KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
   8  endif
   9 +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
  10  
  11  ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  12  asflags-y := -DZIMAGE
  13 diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
  14 index 0c462a9..5291b70 100644
  15 --- a/arch/arm/include/asm/unistd.h
  16 +++ b/arch/arm/include/asm/unistd.h
  17 @@ -19,7 +19,8 @@
  18   * This may need to be greater than __NR_last_syscall+1 in order to
  19   * account for the padding in the syscall table
  20   */
  21 -#define __NR_syscalls  (388 + NR_litmus_syscalls)
  22 +#define __NR_syscalls  (388 + NR_litmus_syscalls + 0)
  23 +
  24  
  25  /*
  26   * *NOTE*: This is a ghost syscall private to the kernel.  Only the
  27 diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
  28 index f4738a8..55dc863 100644
  29 --- a/arch/arm/kernel/calls.S
  30 +++ b/arch/arm/kernel/calls.S
  31 @@ -409,6 +409,14 @@
  32          CALL(sys_wait_for_ts_release)
  33  		CALL(sys_release_ts)
  34  		CALL(sys_null_call)
  35 +/* 400 */	CALL(sys_get_current_budget)
  36 +		CALL(sys_reservation_create)
  37 +		CALL(sys_reservation_destroy)
  38 +		CALL(sys_set_mc2_task_param)
  39 +		CALL(sys_set_page_color)
  40 +/* 405 */	CALL(sys_test_call)
  41 +		CALL(sys_run_test)
  42 +		CALL(sys_lock_buffer)
  43  
  44  #ifndef syscalls_counted
  45  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
  46 diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
  47 index 350f188..720b45e 100644
  48 --- a/arch/arm/kernel/irq.c
  49 +++ b/arch/arm/kernel/irq.c
  50 @@ -44,6 +44,8 @@
  51  #include <asm/mach/irq.h>
  52  #include <asm/mach/time.h>
  53  
  54 +#include <litmus/cache_proc.h>
  55 +
  56  unsigned long irq_err_count;
  57  
  58  int arch_show_interrupts(struct seq_file *p, int prec)
  59 @@ -66,7 +68,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
  60   */
  61  void handle_IRQ(unsigned int irq, struct pt_regs *regs)
  62  {
  63 +	enter_irq_mode();
  64  	__handle_domain_irq(NULL, irq, false, regs);
  65 +	exit_irq_mode();
  66  }
  67  
  68  /*
  69 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
  70 index e309c8f..71c969a 100644
  71 --- a/arch/arm/mm/cache-l2x0.c
  72 +++ b/arch/arm/mm/cache-l2x0.c
  73 @@ -33,6 +33,8 @@
  74  #include "cache-tauros3.h"
  75  #include "cache-aurora-l2.h"
  76  
  77 +#include <litmus/cache_proc.h>
  78 +
  79  struct l2c_init_data {
  80  	const char *type;
  81  	unsigned way_size_0;
  82 @@ -726,7 +728,6 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  83  
  84  	if (n) {
  85  		unsigned i;
  86 -
  87  		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
  88  		for (i = 0; i < n; i++)
  89  			pr_cont(" %s", errata[i]);
  90 @@ -774,6 +775,11 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
  91  	},
  92  };
  93  
  94 +void l2c310_flush_all(void)
  95 +{
  96 +	l2c210_flush_all();
  97 +};
  98 +
  99  static int __init __l2c_init(const struct l2c_init_data *data,
 100  			     u32 aux_val, u32 aux_mask, u32 cache_id)
 101  {
 102 @@ -876,6 +882,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
 103  	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
 104  		data->type, cache_id, aux);
 105  
 106 +	litmus_setup_lockdown(l2x0_base, cache_id);
 107 +	
 108  	return 0;
 109  }
 110  
 111 diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
 112 index 34680a5..b303a9b 100644
 113 --- a/arch/x86/syscalls/syscall_32.tbl
 114 +++ b/arch/x86/syscalls/syscall_32.tbl
 115 @@ -377,3 +377,11 @@
 116  368	i386	wait_for_ts_release	sys_wait_for_ts_release
 117  369	i386	release_ts		sys_release_ts
 118  370	i386	null_call		sys_null_call
 119 +371	i386	get_current_budget	sys_get_current_budget
 120 +372	i386	reservation_create	sys_reservation_create
 121 +373	i386	reservation_destroy	sys_reservation_destroy
 122 +374	i386	set_mc2_task_param	sys_set_mc2_task_param
 123 +375	i386	set_page_color		sys_set_page_color
 124 +376	i386	test_call		sys_test_call
 125 +377	i386	run_test		sys_run_test
 126 +378	i386	lock_buffer		sys_lock_buffer
 127 diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
 128 index cbd1b6b..5f24a80 100644
 129 --- a/arch/x86/syscalls/syscall_64.tbl
 130 +++ b/arch/x86/syscalls/syscall_64.tbl
 131 @@ -342,6 +342,14 @@
 132  360	common	wait_for_ts_release	sys_wait_for_ts_release
 133  361	common	release_ts		sys_release_ts
 134  362	common	null_call		sys_null_call
 135 +363	common	get_current_budget	sys_get_current_budget
 136 +364	common	reservation_create	sys_reservation_create
 137 +365	common	reservation_destroy	sys_reservation_destroy
 138 +366	common	set_mc2_task_param	sys_set_mc2_task_param
 139 +367	common	set_page_color		sys_set_page_color
 140 +368	common	test_call		sys_test_call
 141 +369	common	run_test		sys_run_test
 142 +370	common	lock_buffer		sys_lock_buffer
 143  
 144  #
 145  # x32-specific system call numbers start at 512 to avoid cache impact
 146 diff --git a/include/linux/migrate.h b/include/linux/migrate.h
 147 index cac1c09..b16047b 100644
 148 --- a/include/linux/migrate.h
 149 +++ b/include/linux/migrate.h
 150 @@ -33,6 +33,8 @@ extern int migrate_page(struct address_space *,
 151  			struct page *, struct page *, enum migrate_mode);
 152  extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
 153  		unsigned long private, enum migrate_mode mode, int reason);
 154 +extern int replicate_pages(struct list_head *l, new_page_t new, free_page_t free,
 155 +		unsigned long private, enum migrate_mode mode, int reason);
 156  
 157  extern int migrate_prep(void);
 158  extern int migrate_prep_local(void);
 159 @@ -50,7 +52,11 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
 160  		free_page_t free, unsigned long private, enum migrate_mode mode,
 161  		int reason)
 162  	{ return -ENOSYS; }
 163 -
 164 +static inline int replicate_pages(struct list_head *l, new_page_t new,
 165 +		free_page_t free, unsigned long private, enum migrate_mode mode,
 166 +		int reason)
 167 +	{ return -ENOSYS; }
 168 +	
 169  static inline int migrate_prep(void) { return -ENOSYS; }
 170  static inline int migrate_prep_local(void) { return -ENOSYS; }
 171  
 172 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
 173 index c89c53a..7c90e02 100644
 174 --- a/include/linux/rmap.h
 175 +++ b/include/linux/rmap.h
 176 @@ -188,7 +188,8 @@ int page_referenced(struct page *, int is_locked,
 177  #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 178  
 179  int try_to_unmap(struct page *, enum ttu_flags flags);
 180 -
 181 +int try_to_unmap_one_only(struct page *page, struct vm_area_struct *vma,
 182 +		     unsigned long address, void *arg);
 183  /*
 184   * Used by uprobes to replace a userspace page safely
 185   */
 186 diff --git a/include/litmus/budget.h b/include/litmus/budget.h
 187 index bd2d5c9..60eb814 100644
 188 --- a/include/litmus/budget.h
 189 +++ b/include/litmus/budget.h
 190 @@ -33,4 +33,6 @@ static inline int requeue_preempted_job(struct task_struct* t)
 191  		(!budget_exhausted(t) || !budget_enforced(t));
 192  }
 193  
 194 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining);
 195 +
 196  #endif
 197 diff --git a/include/litmus/cache_proc.h b/include/litmus/cache_proc.h
 198 new file mode 100644
 199 index 0000000..e9440de
 200 --- /dev/null
 201 +++ b/include/litmus/cache_proc.h
 202 @@ -0,0 +1,17 @@
 203 +#ifndef LITMUS_CACHE_PROC_H
 204 +#define LITMUS_CACHE_PROC_H
 205 +
 206 +#ifdef __KERNEL__
 207 +
 208 +void litmus_setup_lockdown(void __iomem*, u32);
 209 +void enter_irq_mode(void);
 210 +void exit_irq_mode(void);
 211 +void flush_cache(int all);
 212 +void lock_cache(int cpu, u32 val);
 213 +
 214 +extern struct page *new_alloc_page_color(unsigned long color);
 215 +
 216 +#endif
 217 +
 218 +#endif
 219 +
 220 diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
 221 index a6eb534..441210c 100644
 222 --- a/include/litmus/litmus.h
 223 +++ b/include/litmus/litmus.h
 224 @@ -113,6 +113,13 @@ static inline lt_t litmus_clock(void)
 225  	((current)->state == TASK_RUNNING || 	\
 226  	 preempt_count() & PREEMPT_ACTIVE)
 227  
 228 +#define is_running(t) 			\
 229 +	((t)->state == TASK_RUNNING || 	\
 230 +	 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
 231 +
 232 +#define is_blocked(t)       \
 233 +	(!is_running(t))
 234 +
 235  #define is_released(t, now)	\
 236  	(lt_before_eq(get_release(t), now))
 237  #define is_tardy(t, now)    \
 238 diff --git a/include/litmus/mc2_common.h b/include/litmus/mc2_common.h
 239 new file mode 100644
 240 index 0000000..e3c0af2
 241 --- /dev/null
 242 +++ b/include/litmus/mc2_common.h
 243 @@ -0,0 +1,31 @@
 244 +/*
 245 + * MC^2 common data structures
 246 + */
 247 + 
 248 +#ifndef __UNC_MC2_COMMON_H__
 249 +#define __UNC_MC2_COMMON_H__
 250 +
 251 +enum crit_level {
 252 +	CRIT_LEVEL_A = 0,
 253 +	CRIT_LEVEL_B = 1,
 254 +	CRIT_LEVEL_C = 2,
 255 +	NUM_CRIT_LEVELS = 3,
 256 +};
 257 +
 258 +struct mc2_task {
 259 +	enum crit_level crit;
 260 +	unsigned int res_id;
 261 +};
 262 +
 263 +#ifdef __KERNEL__
 264 +
 265 +#include <litmus/reservation.h>
 266 +
 267 +#define tsk_mc2_data(t)		(tsk_rt(t)->mc2_data)
 268 +
 269 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
 270 +							struct reservation *res);
 271 +	
 272 +#endif /* __KERNEL__ */
 273 +
 274 +#endif
 275 \ No newline at end of file
 276 diff --git a/include/litmus/polling_reservations.h b/include/litmus/polling_reservations.h
 277 new file mode 100644
 278 index 0000000..66c9b1e
 279 --- /dev/null
 280 +++ b/include/litmus/polling_reservations.h
 281 @@ -0,0 +1,36 @@
 282 +#ifndef LITMUS_POLLING_RESERVATIONS_H
 283 +#define LITMUS_POLLING_RESERVATIONS_H
 284 +
 285 +#include <litmus/reservation.h>
 286 +
 287 +struct polling_reservation {
 288 +	/* extend basic reservation */
 289 +	struct reservation res;
 290 +
 291 +	lt_t max_budget;
 292 +	lt_t period;
 293 +	lt_t deadline;
 294 +	lt_t offset;
 295 +};
 296 +
 297 +void polling_reservation_init(struct polling_reservation *pres, int use_edf_prio,
 298 +	int use_periodic_polling, lt_t budget, lt_t period, lt_t deadline, lt_t offset);
 299 +
 300 +struct table_driven_reservation {
 301 +	/* extend basic reservation */
 302 +	struct reservation res;
 303 +
 304 +	lt_t major_cycle;
 305 +	unsigned int next_interval;
 306 +	unsigned int num_intervals;
 307 +	struct lt_interval *intervals;
 308 +
 309 +	/* info about current scheduling slot */
 310 +	struct lt_interval cur_interval;
 311 +	lt_t major_cycle_start;
 312 +};
 313 +
 314 +void table_driven_reservation_init(struct table_driven_reservation *tdres,
 315 +	lt_t major_cycle, struct lt_interval *intervals, unsigned int num_intervals);
 316 +
 317 +#endif
 318 diff --git a/include/litmus/replicate_lib.h b/include/litmus/replicate_lib.h
 319 new file mode 100644
 320 index 0000000..186837b
 321 --- /dev/null
 322 +++ b/include/litmus/replicate_lib.h
 323 @@ -0,0 +1,19 @@
 324 +#ifndef LITMUS_REPLICATE_LIB_H
 325 +#define LITMUS_REPLICATE_LIB_H
 326 +
 327 +#include <linux/list.h>
 328 +#include <linux/mm_types.h>
 329 +#include <linux/mm_inline.h>
 330 +
 331 +/* Data structure for the "master" list */
 332 +struct shared_lib_page {
 333 +	struct page *master_page;
 334 +	struct page *r_page[NR_CPUS+1];
 335 +	unsigned long int master_pfn;
 336 +	unsigned long int r_pfn[NR_CPUS+1];
 337 +	struct list_head list;
 338 +};
 339 +
 340 +extern struct list_head shared_lib_pages;
 341 +
 342 +#endif
 343 diff --git a/include/litmus/reservation.h b/include/litmus/reservation.h
 344 new file mode 100644
 345 index 0000000..7e022b3
 346 --- /dev/null
 347 +++ b/include/litmus/reservation.h
 348 @@ -0,0 +1,256 @@
 349 +#ifndef LITMUS_RESERVATION_H
 350 +#define LITMUS_RESERVATION_H
 351 +
 352 +#include <linux/list.h>
 353 +#include <linux/hrtimer.h>
 354 +
 355 +struct reservation_client;
 356 +struct reservation_environment;
 357 +struct reservation;
 358 +
 359 +typedef enum {
 360 +	/* reservation has no clients, is not consuming budget */
 361 +	RESERVATION_INACTIVE = 0,
 362 +
 363 +	/* reservation has clients, consumes budget when scheduled */
 364 +	RESERVATION_ACTIVE,
 365 +
 366 +	/* reservation has no clients, but may be consuming budget */
 367 +	RESERVATION_ACTIVE_IDLE,
 368 +
 369 +	/* Reservation has no budget and waits for
 370 +	 * replenishment. May or may not have clients. */
 371 +	RESERVATION_DEPLETED,
 372 +} reservation_state_t;
 373 +
 374 +
 375 +/* ************************************************************************** */
 376 +
 377 +/* Select which task to dispatch. If NULL is returned, it means there is nothing
 378 + * to schedule right now and background work can be scheduled. */
 379 +typedef struct task_struct * (*dispatch_t)  (
 380 +	struct reservation_client *client
 381 +);
 382 +
 383 +/* Something that can be managed in a reservation and that can yield
 384 + * a process for dispatching. Contains a pointer to the reservation
 385 + * to which it "belongs". */
 386 +struct reservation_client {
 387 +	struct list_head list;
 388 +	struct reservation* reservation;
 389 +	dispatch_t dispatch;
 390 +};
 391 +
 392 +
 393 +/* ************************************************************************** */
 394 +
 395 +/* Called by reservations to request state change. */
 396 +typedef void (*reservation_change_state_t)  (
 397 +	struct reservation_environment* env,
 398 +	struct reservation *res,
 399 +	reservation_state_t new_state
 400 +);
 401 +
 402 +/* The framework within wich reservations operate. */
 403 +struct reservation_environment {
 404 +	lt_t time_zero;
 405 +	lt_t current_time;
 406 +
 407 +	/* services invoked by reservations */
 408 +	reservation_change_state_t change_state;
 409 +};
 410 +
 411 +
 412 +/* ************************************************************************** */
 413 +
 414 +/* A new client is added or an existing client resumes. */
 415 +typedef void (*client_arrives_t)  (
 416 +	struct reservation *reservation,
 417 +	struct reservation_client *client
 418 +);
 419 +
 420 +/* A client suspends or terminates. */
 421 +typedef void (*client_departs_t)  (
 422 +	struct reservation *reservation,
 423 +	struct reservation_client *client,
 424 +	int did_signal_job_completion
 425 +);
 426 +
 427 +/* A previously requested replenishment has occurred. */
 428 +typedef void (*on_replenishment_timer_t)  (
 429 +	struct reservation *reservation
 430 +);
 431 +
 432 +/* Update the reservation's budget to reflect execution or idling. */
 433 +typedef void (*drain_budget_t) (
 434 +	struct reservation *reservation,
 435 +	lt_t how_much
 436 +);
 437 +
 438 +/* Select a ready task from one of the clients for scheduling. */
 439 +typedef struct task_struct* (*dispatch_client_t)  (
 440 +	struct reservation *reservation,
 441 +	lt_t *time_slice /* May be used to force rescheduling after
 442 +	                    some amount of time. 0 => no limit */
 443 +);
 444 +
 445 +
 446 +struct reservation_ops {
 447 +	dispatch_client_t dispatch_client;
 448 +
 449 +	client_arrives_t client_arrives;
 450 +	client_departs_t client_departs;
 451 +
 452 +	on_replenishment_timer_t replenish;
 453 +	drain_budget_t drain_budget;
 454 +};
 455 +
 456 +struct reservation {
 457 +	/* used to queue in environment */
 458 +	struct list_head list;
 459 +
 460 +	reservation_state_t state;
 461 +	unsigned int id;
 462 +
 463 +	/* exact meaning defined by impl. */
 464 +	lt_t priority;
 465 +	lt_t cur_budget;
 466 +	lt_t next_replenishment;
 467 +
 468 +	/* budget stats */
 469 +	lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
 470 +	lt_t budget_consumed_total;
 471 +
 472 +	/* interaction with framework */
 473 +	struct reservation_environment *env;
 474 +	struct reservation_ops *ops;
 475 +
 476 +	struct list_head clients;
 477 +	
 478 +	/* for global env. */
 479 +	int scheduled_on;
 480 +	int event_added;
 481 +	/* for blocked by ghost. Do not charge budget when ACTIVE */
 482 +	int blocked_by_ghost;
 483 +	/* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
 484 +	int is_ghost;
 485 +};
 486 +
 487 +void reservation_init(struct reservation *res);
 488 +
 489 +/* Default implementations */
 490 +
 491 +/* simply select the first client in the list, set *for_at_most to zero */
 492 +struct task_struct* default_dispatch_client(
 493 +	struct reservation *res,
 494 +	lt_t *for_at_most
 495 +);
 496 +
 497 +/* "connector" reservation client to hook up tasks with reservations */
 498 +struct task_client {
 499 +	struct reservation_client client;
 500 +	struct task_struct *task;
 501 +};
 502 +
 503 +void task_client_init(struct task_client *tc, struct task_struct *task,
 504 +	struct reservation *reservation);
 505 +
 506 +#define SUP_RESCHEDULE_NOW (0)
 507 +#define SUP_NO_SCHEDULER_UPDATE (ULLONG_MAX)
 508 +
 509 +/* A simple uniprocessor (SUP) flat (i.e., non-hierarchical) reservation
 510 + * environment.
 511 + */
 512 +struct sup_reservation_environment {
 513 +	struct reservation_environment env;
 514 +
 515 +	/* ordered by priority */
 516 +	struct list_head active_reservations;
 517 +
 518 +	/* ordered by next_replenishment */
 519 +	struct list_head depleted_reservations;
 520 +
 521 +	/* unordered */
 522 +	struct list_head inactive_reservations;
 523 +
 524 +	/* - SUP_RESCHEDULE_NOW means call sup_dispatch() now
 525 +	 * - SUP_NO_SCHEDULER_UPDATE means nothing to do
 526 +	 * any other value means program a timer for the given time
 527 +	 */
 528 +	lt_t next_scheduler_update;
 529 +	/* set to true if a call to sup_dispatch() is imminent */
 530 +	bool will_schedule;
 531 +};
 532 +
 533 +/* Contract:
 534 + *  - before calling into sup_ code, or any reservation methods,
 535 + *    update the time with sup_update_time(); and
 536 + *  - after calling into sup_ code, or any reservation methods,
 537 + *    check next_scheduler_update and program timer or trigger
 538 + *    scheduler invocation accordingly.
 539 + */
 540 +
 541 +void sup_init(struct sup_reservation_environment* sup_env);
 542 +void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
 543 +	struct reservation* new_res);
 544 +void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
 545 +	lt_t timeout);
 546 +void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
 547 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
 548 +
 549 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
 550 +	unsigned int id);
 551 +	
 552 +/* A global multiprocessor reservation environment. */
 553 +
 554 +typedef enum {
 555 +	EVENT_REPLENISH = 0,
 556 +	EVENT_DRAIN,
 557 +	EVENT_OTHERS,
 558 +} event_type_t;
 559 +
 560 +
 561 +struct next_timer_event {
 562 +	lt_t next_update;
 563 +	int timer_armed_on;
 564 +	unsigned int id;
 565 +	event_type_t type;
 566 +	struct list_head list;
 567 +};
 568 +
 569 +struct gmp_reservation_environment {
 570 +	raw_spinlock_t lock;
 571 +	struct reservation_environment env;
 572 +
 573 +	/* ordered by priority */
 574 +	struct list_head active_reservations;
 575 +
 576 +	/* ordered by next_replenishment */
 577 +	struct list_head depleted_reservations;
 578 +
 579 +	/* unordered */
 580 +	struct list_head inactive_reservations;
 581 +
 582 +	/* timer event ordered by next_update */
 583 +	struct list_head next_events;
 584 +	
 585 +	/* (schedule_now == true) means call gmp_dispatch() now */
 586 +	int schedule_now;
 587 +	/* set to true if a call to gmp_dispatch() is imminent */
 588 +	bool will_schedule;
 589 +};
 590 +
 591 +void gmp_init(struct gmp_reservation_environment* gmp_env);
 592 +void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
 593 +	struct reservation* new_res);
 594 +void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
 595 +	lt_t timeout, unsigned int id, event_type_t type);
 596 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
 597 +int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
 598 +struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
 599 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
 600 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
 601 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
 602 +	unsigned int id);
 603 +
 604 +#endif
 605 diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
 606 index 7b9a909..56de045 100644
 607 --- a/include/litmus/rt_param.h
 608 +++ b/include/litmus/rt_param.h
 609 @@ -51,6 +51,16 @@ typedef enum {
 610  	TASK_EARLY
 611  } release_policy_t;
 612  
 613 +#ifdef CONFIG_PGMRT_SUPPORT
 614 +typedef enum {
 615 +    PGM_NOT_A_NODE,
 616 +    PGM_SRC,
 617 +    PGM_SINK,
 618 +    PGM_SRC_SINK,
 619 +    PGM_INTERNAL
 620 +} pgm_node_type_t;
 621 +#endif
 622 +
 623  /* We use the common priority interpretation "lower index == higher priority",
 624   * which is commonly used in fixed-priority schedulability analysis papers.
 625   * So, a numerically lower priority value implies higher scheduling priority,
 626 @@ -62,6 +72,7 @@ typedef enum {
 627  #define LITMUS_MAX_PRIORITY     512
 628  #define LITMUS_HIGHEST_PRIORITY   1
 629  #define LITMUS_LOWEST_PRIORITY    (LITMUS_MAX_PRIORITY - 1)
 630 +#define LITMUS_NO_PRIORITY		UINT_MAX
 631  
 632  /* Provide generic comparison macros for userspace,
 633   * in case that we change this later. */
 634 @@ -71,6 +82,46 @@ typedef enum {
 635  	((p) >= LITMUS_HIGHEST_PRIORITY &&	\
 636  	 (p) <= LITMUS_LOWEST_PRIORITY)
 637  
 638 +/* reservation support */
 639 +
 640 +typedef enum {
 641 +	PERIODIC_POLLING,
 642 +	SPORADIC_POLLING,
 643 +	TABLE_DRIVEN,
 644 +} reservation_type_t;
 645 +
 646 +struct lt_interval {
 647 +	lt_t start;
 648 +	lt_t end;
 649 +};
 650 +
 651 +#ifndef __KERNEL__
 652 +#define __user
 653 +#endif
 654 +
 655 +struct reservation_config {
 656 +	unsigned int id;
 657 +	lt_t priority;
 658 +	int  cpu;
 659 +
 660 +	union {
 661 +		struct {
 662 +			lt_t period;
 663 +			lt_t budget;
 664 +			lt_t relative_deadline;
 665 +			lt_t offset;
 666 +		} polling_params;
 667 +
 668 +		struct {
 669 +			lt_t major_cycle_length;
 670 +			unsigned int num_intervals;
 671 +			struct lt_interval __user *intervals;
 672 +		} table_driven_params;
 673 +	};
 674 +};
 675 +
 676 +/* regular sporadic task support */
 677 +
 678  struct rt_task {
 679  	lt_t 		exec_cost;
 680  	lt_t 		period;
 681 @@ -81,6 +132,10 @@ struct rt_task {
 682  	task_class_t	cls;
 683  	budget_policy_t  budget_policy;  /* ignored by pfair */
 684  	release_policy_t release_policy;
 685 +#ifdef CONFIG_PGMRT_SUPPORT
 686 +	pgm_node_type_t	pgm_type;
 687 +	lt_t			pgm_expected_etoe;
 688 +#endif
 689  };
 690  
 691  union np_flag {
 692 @@ -121,6 +176,13 @@ struct control_page {
 693  	uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
 694  				     * started. */
 695  
 696 +#ifdef CONFIG_PGMRT_SUPPORT
 697 +    /* Flags from userspace signifying PGM wait states. */
 698 +    volatile uint32_t   pgm_waiting;    /* waiting for tokens */
 699 +    volatile uint32_t   pgm_sending;    /* sending tokens */
 700 +    volatile uint32_t   pgm_satisfied;  /* done waiting/sending */
 701 +#endif
 702 +
 703  	/* to be extended */
 704  };
 705  
 706 @@ -165,6 +227,7 @@ struct rt_job {
 707  };
 708  
 709  struct pfair_param;
 710 +struct mc2_task;
 711  
 712  /*	RT task parameters for scheduling extensions
 713   *	These parameters are inherited during clone and therefore must
 714 @@ -246,7 +309,10 @@ struct rt_param {
 715  	volatile int		linked_on;
 716  
 717  	/* PFAIR/PD^2 state. Allocated on demand. */
 718 -	struct pfair_param*	pfair;
 719 +	union {
 720 +		void *plugin_state;
 721 +		struct pfair_param *pfair;
 722 +	};
 723  
 724  	/* Fields saved before BE->RT transition.
 725  	 */
 726 @@ -275,6 +341,10 @@ struct rt_param {
 727  
 728  	/* Pointer to the page shared between userspace and kernel. */
 729  	struct control_page * ctrl_page;
 730 +
 731 +	/* Mixed-criticality specific data */
 732 +	struct mc2_task* mc2_data;
 733 +	unsigned long addr_ctrl_page;
 734  };
 735  
 736  #endif
 737 diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
 738 index 0ccccd6..4c8aaa6 100644
 739 --- a/include/litmus/sched_plugin.h
 740 +++ b/include/litmus/sched_plugin.h
 741 @@ -77,6 +77,17 @@ typedef long (*wait_for_release_at_t)(lt_t release_time);
 742  /* Informs the plugin when a synchronous release takes place. */
 743  typedef void (*synchronous_release_at_t)(lt_t time_zero);
 744  
 745 +/* How much budget has the current task consumed so far, and how much
 746 + * has it left? The default implementation ties into the per-task
 747 + * budget enforcement code. Plugins can override this to report
 748 + * reservation-specific values. */
 749 +typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining);
 750 +
 751 +/* Reservation creation/removal backends. Meaning of reservation_type and
 752 + * reservation_id are entirely plugin-specific. */
 753 +typedef long (*reservation_create_t)(int reservation_type, void* __user config);
 754 +typedef long (*reservation_destroy_t)(unsigned int reservation_id, int cpu);
 755 +
 756  /************************ misc routines ***********************/
 757  
 758  
 759 @@ -109,6 +120,12 @@ struct sched_plugin {
 760  	task_exit_t 		task_exit;
 761  	task_cleanup_t		task_cleanup;
 762  
 763 +	current_budget_t	current_budget;
 764 +
 765 +	/* Reservation support */
 766 +	reservation_create_t	reservation_create;
 767 +	reservation_destroy_t	reservation_destroy;
 768 +
 769  #ifdef CONFIG_LITMUS_LOCKING
 770  	/*	locking protocols	*/
 771  	allocate_lock_t		allocate_lock;
 772 diff --git a/include/litmus/trace.h b/include/litmus/trace.h
 773 index 6017872..24ca412 100644
 774 --- a/include/litmus/trace.h
 775 +++ b/include/litmus/trace.h
 776 @@ -118,6 +118,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 777  #define TS_TICK_START(t)		CPU_TTIMESTAMP(110, t)
 778  #define TS_TICK_END(t) 			CPU_TTIMESTAMP(111, t)
 779  
 780 +#define TS_RELEASE_C_START		CPU_DTIMESTAMP(108, TSK_RT)
 781 +#define TS_RELEASE_C_END		CPU_DTIMESTAMP(109, TSK_RT)
 782 +
 783  #define TS_QUANTUM_BOUNDARY_START	CPU_TIMESTAMP_CUR(112)
 784  #define TS_QUANTUM_BOUNDARY_END		CPU_TIMESTAMP_CUR(113)
 785  
 786 @@ -137,6 +140,17 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 787  #define TS_SEND_RESCHED_START(c)	MSG_TIMESTAMP_SENT(190, c)
 788  #define TS_SEND_RESCHED_END		MSG_TIMESTAMP_RECEIVED(191)
 789  
 790 -#define TS_RELEASE_LATENCY(when)	CPU_LTIMESTAMP(208, &(when))
 791 +#define TS_ISR_START			CPU_TIMESTAMP_CUR(192)
 792 +#define TS_ISR_END				CPU_TIMESTAMP_CUR(193)
 793 +
 794 +#define TS_RELEASE_LATENCY(when)    CPU_LTIMESTAMP(208, &(when))
 795 +#define TS_RELEASE_LATENCY_A(when)  CPU_LTIMESTAMP(209, &(when))
 796 +#define TS_RELEASE_LATENCY_B(when)  CPU_LTIMESTAMP(210, &(when))
 797 +#define TS_RELEASE_LATENCY_C(when)  CPU_LTIMESTAMP(211, &(when))
 798 +
 799 +#define TS_SCHED_A_START			CPU_DTIMESTAMP(212, TSK_UNKNOWN)
 800 +#define TS_SCHED_A_END(t)			CPU_TTIMESTAMP(213, t)
 801 +#define TS_SCHED_C_START			CPU_DTIMESTAMP(214, TSK_UNKNOWN)
 802 +#define TS_SCHED_C_END(t)			CPU_TTIMESTAMP(215, t)
 803  
 804  #endif /* !_SYS_TRACE_H_ */
 805 diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
 806 index 94264c2..86bbbb8d 100644
 807 --- a/include/litmus/unistd_32.h
 808 +++ b/include/litmus/unistd_32.h
 809 @@ -17,5 +17,13 @@
 810  #define __NR_wait_for_ts_release __LSC(9)
 811  #define __NR_release_ts		__LSC(10)
 812  #define __NR_null_call		__LSC(11)
 813 +#define __NR_get_current_budget __LSC(12)
 814 +#define __NR_reservation_create	__LSC(13)
 815 +#define __NR_reservation_destroy	__LSC(14)
 816 +#define __NR_set_mc2_task_param	__LSC(15)
 817 +#define __NR_set_page_color		__LSC(16)
 818 +#define __NR_test_call		__LSC(17)
 819 +#define __NR_run_test		__LSC(18)
 820 +#define __NR_lock_buffer	__LSC(19)
 821  
 822 -#define NR_litmus_syscalls 12
 823 +#define NR_litmus_syscalls	20
 824 diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
 825 index d5ced0d..4b96e7c 100644
 826 --- a/include/litmus/unistd_64.h
 827 +++ b/include/litmus/unistd_64.h
 828 @@ -29,5 +29,22 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
 829  __SYSCALL(__NR_release_ts, sys_release_ts)
 830  #define __NR_null_call				__LSC(11)
 831  __SYSCALL(__NR_null_call, sys_null_call)
 832 +#define __NR_get_current_budget			__LSC(12)
 833 +__SYSCALL(__NR_get_current_budget, sys_get_current_budget)
 834 +#define __NR_reservation_create		__LSC(13)
 835 +__SYSCALL(__NR_reservation_create, sys_reservation_create)
 836 +#define __NR_reservation_destroy	__LSC(14)
 837 +__SYSCALL(__NR_reservation_destroy, sys_reservation_destroy)
 838 +#define __NR_set_mc2_task_param		__LSC(15)
 839 +__SYSCALL(__NR_set_mc2_task_param,	sys_set_mc2_task_param)
 840 +#define __NR_set_page_color			__LSC(16)
 841 +__SYSCALL(__NR_set_page_color,		sys_set_page_color)
 842 +#define __NR_test_call				__LSC(17)
 843 +__SYSCALL(__NR_test_call, sys_test_call)
 844 +#define __NR_run_test				__LSC(18)
 845 +__SYSCALL(__NR_run_test, sys_run_test)
 846 +#define __NR_lock_buffer			__LSC(19)
 847 +__SYACALL(__NR_lock_buffer, sys_lock_buffer)
 848  
 849 -#define NR_litmus_syscalls 12
 850 +
 851 +#define NR_litmus_syscalls 20
 852 diff --git a/kernel/sched/litmus.c b/kernel/sched/litmus.c
 853 index 9d58690..cd36358 100644
 854 --- a/kernel/sched/litmus.c
 855 +++ b/kernel/sched/litmus.c
 856 @@ -20,8 +20,9 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p)
 857  	/* task counter */
 858  	p->se.sum_exec_runtime += delta;
 859  	if (delta) {
 860 -		TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
 861 -			delta, p->rt_param.job_params.exec_time, budget_remaining(p));
 862 +		//TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
 863 +			//delta, p->rt_param.job_params.exec_time, budget_remaining(p));
 864 +		;
 865  	}
 866  	/* sched_clock() */
 867  	p->se.exec_start = rq->clock;
 868 diff --git a/litmus/Makefile b/litmus/Makefile
 869 index 7970cd5..7e4711c 100644
 870 --- a/litmus/Makefile
 871 +++ b/litmus/Makefile
 872 @@ -11,6 +11,7 @@ obj-y     = sched_plugin.o litmus.o \
 873  	    sync.o \
 874  	    rt_domain.o \
 875  	    edf_common.o \
 876 +		mc2_common.o \
 877  	    fp_common.o \
 878  	    fdso.o \
 879  	    locking.o \
 880 @@ -19,13 +20,19 @@ obj-y     = sched_plugin.o litmus.o \
 881  	    binheap.o \
 882  	    ctrldev.o \
 883  	    uncachedev.o \
 884 +		reservation.o \
 885 +		polling_reservations.o \
 886  	    sched_gsn_edf.o \
 887  	    sched_psn_edf.o \
 888 -	    sched_pfp.o
 889 +	    sched_pfp.o \
 890 +		sched_mc2.o \
 891 +		bank_proc.o \
 892 +	    color_shm.o \
 893 +		replicate_lib.o \
 894 +		cache_proc.o
 895  
 896  obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
 897  obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
 898 -
 899  obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
 900  obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
 901  obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
 902 diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c
 903 new file mode 100644
 904 index 0000000..2284f4f
 905 --- /dev/null
 906 +++ b/litmus/bank_proc.c
 907 @@ -0,0 +1,741 @@
 908 +/*
 909 + * bank_proc.c -- Implementation of the page coloring for cache and bank partition. 
 910 + *                The file will keep a pool of colored pages. Users can require pages with 
 911 + *		  specific color or bank number.
 912 + *                Part of the code is modified from Jonathan Herman's code  
 913 + */
 914 +#include <linux/init.h>
 915 +#include <linux/types.h>
 916 +#include <linux/kernel.h>
 917 +#include <linux/module.h>
 918 +#include <linux/sysctl.h>
 919 +#include <linux/slab.h>
 920 +#include <linux/io.h>
 921 +#include <linux/mutex.h>
 922 +#include <linux/mm.h>
 923 +#include <linux/random.h>
 924 +
 925 +#include <litmus/litmus_proc.h>
 926 +#include <litmus/sched_trace.h>
 927 +#include <litmus/litmus.h>
 928 +
 929 +#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
 930 +
 931 +// This Address Decoding is used in imx6-sabredsd platform
 932 +#define BANK_MASK  0x38000000     
 933 +#define BANK_SHIFT  27
 934 +
 935 +#define CACHE_MASK  0x0000f000      
 936 +#define CACHE_SHIFT 12
 937 +
 938 +#define PAGES_PER_COLOR 2000
 939 +#define PAGES_PER_COLOR_HALF 1000
 940 +unsigned int NUM_PAGE_LIST;  //8*16
 941 +
 942 +unsigned int number_banks;
 943 +unsigned int number_cachecolors;
 944 +
 945 +unsigned int set_partition_max = 0x0000ffff;
 946 +unsigned int set_partition_min = 0;
 947 +unsigned int bank_partition_max = 0x000000ff;
 948 +unsigned int bank_partition_min = 0;
 949 +
 950 +int show_page_pool = 0;
 951 +int refill_page_pool = 0;
 952 +spinlock_t reclaim_lock;
 953 +
 954 +unsigned int set_partition[9] = {
 955 +        0x00000003,  /* Core 0, and Level A*/
 956 +        0x00000003,  /* Core 0, and Level B*/
 957 +        0x0000000C,  /* Core 1, and Level A*/
 958 +        0x0000000C,  /* Core 1, and Level B*/
 959 +        0x00000030,  /* Core 2, and Level A*/
 960 +        0x00000030,  /* Core 2, and Level B*/
 961 +        0x000000C0,  /* Core 3, and Level A*/
 962 +        0x000000C0,  /* Core 3, and Level B*/
 963 +        0x0000ff00,  /* Level C */
 964 +};
 965 +
 966 +unsigned int bank_partition[9] = {
 967 +        0x00000010,  /* Core 0, and Level A*/
 968 +        0x00000010,  /* Core 0, and Level B*/
 969 +        0x00000020,  /* Core 1, and Level A*/
 970 +        0x00000020,  /* Core 1, and Level B*/
 971 +        0x00000040,  /* Core 2, and Level A*/
 972 +        0x00000040,  /* Core 2, and Level B*/
 973 +        0x00000080,  /* Core 3, and Level A*/
 974 +        0x00000080,  /* Core 3, and Level B*/
 975 +        0x0000000c,  /* Level C */
 976 +};
 977 +
 978 +unsigned int set_index[9] = {
 979 +    0, 0, 0, 0, 0, 0, 0, 0, 0
 980 +};
 981 +
 982 +unsigned int bank_index[9] = {
 983 +    0, 0, 0, 0, 0, 0, 0, 0, 0
 984 +};
 985 +
 986 +struct mutex void_lockdown_proc;
 987 +
 988 +
 989 +/*
 990 + * Every page list should contain a lock, a list, and a number recording how many pages it store
 991 + */ 
 992 +struct color_group {
 993 +	spinlock_t lock;
 994 +	char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
 995 +	struct list_head list;
 996 +	atomic_t nr_pages;
 997 +};
 998 +
 999 +
1000 +static struct color_group *color_groups;
1001 +
1002 +/*
1003 + * Naive function to count the number of 1's
1004 + */
1005 +unsigned int counting_one_set(unsigned int v)
1006 +{
1007 +//    unsigned int v; // count the number of bits set in v
1008 +    unsigned int c; // c accumulates the total bits set in v
1009 +
1010 +    for (c = 0; v; v >>= 1)
1011 +    {
1012 +        c += v & 1;
1013 +    }
1014 +    return c;
1015 +}
1016 +
1017 +unsigned int two_exp(unsigned int e)
1018 +{
1019 +    unsigned int v = 1;
1020 +    for (; e>0; e-- )
1021 +    {
1022 +        v=v*2;
1023 +    }
1024 +    return v;
1025 +}
1026 +
1027 +unsigned int num_by_bitmask_index(unsigned int bitmask, unsigned int index)
1028 +{
1029 +    unsigned int pos = 0;
1030 +
1031 +    while(true)
1032 +    {
1033 +        if(index ==0 && (bitmask & 1)==1)
1034 +        {
1035 +            break;
1036 +        }
1037 +        if(index !=0 && (bitmask & 1)==1){
1038 +            index--;
1039 +        }
1040 +        pos++;
1041 +        bitmask = bitmask >>1;
1042 +
1043 +    }
1044 +    return pos;
1045 +}
1046 +
1047 +
1048 +
1049 +/* Decoding page color, 0~15 */ 
1050 +static inline unsigned int page_color(struct page *page)
1051 +{
1052 +	return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
1053 +}
1054 +
1055 +/* Decoding page bank number, 0~7 */ 
1056 +static inline unsigned int page_bank(struct page *page)
1057 +{
1058 +	return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
1059 +}
1060 +
1061 +static inline unsigned int page_list_index(struct page *page)
1062 +{
1063 +    unsigned int idx;  
1064 +    idx = (page_color(page) + page_bank(page)*(number_cachecolors));
1065 +//    printk("address = %lx, ", page_to_phys(page));
1066 +//    printk("color(%d), bank(%d), indx = %d\n", page_color(page), page_bank(page), idx);
1067 +
1068 +    return idx; 
1069 +}
1070 +
1071 +
1072 +
1073 +/*
1074 + * It is used to determine the smallest number of page lists. 
1075 + */
1076 +static unsigned long smallest_nr_pages(void)
1077 +{
1078 +	unsigned long i, min_pages;
1079 +	struct color_group *cgroup;
1080 +	cgroup = &color_groups[16*2];
1081 +	min_pages =atomic_read(&cgroup->nr_pages); 
1082 +	for (i = 16*2; i < NUM_PAGE_LIST; ++i) {
1083 +		cgroup = &color_groups[i];
1084 +		if (atomic_read(&cgroup->nr_pages) < min_pages)
1085 +			min_pages = atomic_read(&cgroup->nr_pages);
1086 +	}
1087 +	return min_pages;
1088 +}
1089 +
1090 +static void show_nr_pages(void)
1091 +{
1092 +	unsigned long i;
1093 +	struct color_group *cgroup;
1094 +	printk("show nr pages***************************************\n");
1095 +	for (i = 0; i < NUM_PAGE_LIST; ++i) {
1096 +		cgroup = &color_groups[i];
1097 +		printk("(%03d) =  %03d, ", i, atomic_read(&cgroup->nr_pages));
1098 +		if((i % 8) ==7){
1099 +		    printk("\n");
1100 +                }
1101 +	}
1102 +}
1103 +
1104 +/*
1105 + * Add a page to current pool.
1106 + */
1107 +void add_page_to_color_list(struct page *page)
1108 +{
1109 +	const unsigned long color = page_list_index(page);
1110 +	struct color_group *cgroup = &color_groups[color];
1111 +	BUG_ON(in_list(&page->lru) || PageLRU(page));
1112 +	BUG_ON(page_count(page) > 1);
1113 +	spin_lock(&cgroup->lock);
1114 +	list_add_tail(&page->lru, &cgroup->list);
1115 +	atomic_inc(&cgroup->nr_pages);
1116 +	SetPageLRU(page);
1117 +	spin_unlock(&cgroup->lock);
1118 +}
1119 +
1120 +/*
1121 + * Replenish the page pool. 
1122 + * If the newly allocate page is what we want, it will be pushed to the correct page list
1123 + * otherwise, it will be freed. 
1124 + */
1125 +static int do_add_pages(void)
1126 +{
1127 +	//printk("LITMUS do add pages\n");
1128 +	
1129 +	struct page *page, *page_tmp;
1130 +	LIST_HEAD(free_later);
1131 +	unsigned long color;
1132 +	int ret = 0;
1133 +	int i = 0;
1134 +	int free_counter = 0;
1135 +	unsigned long counter[128]= {0}; 
1136 +        
1137 +        //printk("Before refill : \n");
1138 +        //show_nr_pages();
1139 +
1140 +	// until all the page lists contain enough pages 
1141 +	//for (i =0; i<5; i++) {
1142 +	for (i=0; i< 1024*100;i++) {
1143 +	//while (smallest_nr_pages() < PAGES_PER_COLOR) {
1144 +       //         printk("smallest = %d\n", smallest_nr_pages());	
1145 +		page = alloc_page(GFP_HIGHUSER_MOVABLE);
1146 +	    //    page = alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0);
1147 +	
1148 +		if (unlikely(!page)) {
1149 +			printk(KERN_WARNING "Could not allocate pages.\n");
1150 +			ret = -ENOMEM;
1151 +			goto out;
1152 +		}
1153 +		color = page_list_index(page);
1154 +		counter[color]++;
1155 +	//	printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1156 +                //show_nr_pages();
1157 +		if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) {
1158 +		//if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) {
1159 +		//if ( PAGES_PER_COLOR && color>=16*2) {
1160 +			add_page_to_color_list(page);
1161 +	//		printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page));
1162 +		} else{
1163 +			// Pages here will be freed later 
1164 +			list_add_tail(&page->lru, &free_later);
1165 +			free_counter++;
1166 +		        //list_del(&page->lru);
1167 +		//        __free_page(page);
1168 +	//		printk("useless page(%d) = color %x, bank %x\n", color,  page_color(page), page_bank(page));
1169 +		}
1170 +               //show_nr_pages();
1171 +                /*
1172 +                if(free_counter >= PAGES_PER_COLOR)
1173 +                {
1174 +                    printk("free unwanted page list eariler");
1175 +                    free_counter = 0;
1176 +	            list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1177 +		        list_del(&page->lru);
1178 +		        __free_page(page);
1179 +	            }
1180 +
1181 +                    show_nr_pages();
1182 +                }
1183 +                */
1184 +        }
1185 +/*        printk("page counter = \n");
1186 +        for (i=0; i<128; i++)
1187 +        {
1188 +            printk("(%03d) = %4d, ", i , counter[i]);
1189 +            if(i%8 == 7){
1190 +                printk("\n");
1191 +            }
1192 +
1193 +        }
1194 +*/	
1195 +        //printk("After refill : \n");
1196 +        //show_nr_pages();
1197 +#if 1
1198 +	// Free the unwanted pages
1199 +	list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1200 +		list_del(&page->lru);
1201 +		__free_page(page);
1202 +	}
1203 +#endif
1204 +out:
1205 +        return ret;
1206 +}
1207 +
1208 +/*
1209 + * Provide pages for replacement according cache color 
1210 + * This should be the only implementation here
1211 + * This function should not be accessed by others directly. 
1212 + * 
1213 + */ 
1214 +static struct  page *new_alloc_page_color( unsigned long color, int do_refill)
1215 +{
1216 +//	printk("allocate new page color = %d\n", color);	
1217 +	struct color_group *cgroup;
1218 +	struct page *rPage = NULL;
1219 +		
1220 +	if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
1221 +		TRACE_CUR("Wrong color %lu\n", color);	
1222 +//		printk(KERN_WARNING "Wrong color %lu\n", color);
1223 +		goto out;
1224 +	}
1225 +
1226 +		
1227 +	cgroup = &color_groups[color];
1228 +	spin_lock(&cgroup->lock);
1229 +	if (unlikely(!atomic_read(&cgroup->nr_pages))) {
1230 +		TRACE_CUR("No free %lu colored pages.\n", color);
1231 +//		printk(KERN_WARNING "no free %lu colored pages.\n", color);
1232 +		goto out_unlock;
1233 +	}
1234 +	rPage = list_first_entry(&cgroup->list, struct page, lru);
1235 +	BUG_ON(page_count(rPage) > 1);
1236 +	//get_page(rPage);
1237 +	list_del(&rPage->lru);
1238 +	atomic_dec(&cgroup->nr_pages);
1239 +	ClearPageLRU(rPage);
1240 +out_unlock:
1241 +	spin_unlock(&cgroup->lock);
1242 +out:
1243 +	if( smallest_nr_pages() == 0 && do_refill == 1)
1244 +        {
1245 +		do_add_pages();
1246 +       //     printk("ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n");        
1247 +        
1248 +        }
1249 +		
1250 +	return rPage;
1251 +}
1252 +
1253 +struct page* get_colored_page(unsigned long color)
1254 +{
1255 +	return new_alloc_page_color(color, 1);
1256 +}
1257 +
1258 +/*
1259 + * provide pages for replacement according to  
1260 + * node = 0 for Level A tasks in Cpu 0
1261 + * node = 1 for Level B tasks in Cpu 0
1262 + * node = 2 for Level A tasks in Cpu 1
1263 + * node = 3 for Level B tasks in Cpu 1
1264 + * node = 4 for Level A tasks in Cpu 2
1265 + * node = 5 for Level B tasks in Cpu 2
1266 + * node = 6 for Level A tasks in Cpu 3
1267 + * node = 7 for Level B tasks in Cpu 3
1268 + * node = 8 for Level C tasks 
1269 + */
1270 +struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
1271 +{
1272 +//	printk("allocate new page node = %d\n", node);	
1273 +//	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
1274 +	struct color_group *cgroup;
1275 +	struct page *rPage = NULL;
1276 +	unsigned int color;
1277 +	
1278 +
1279 +    unsigned int idx = 0;
1280 +	do {
1281 +        idx += num_by_bitmask_index(set_partition[node], set_index[node]);
1282 +        idx += number_cachecolors* num_by_bitmask_index(bank_partition[node], bank_index[node]);
1283 +		rPage =  new_alloc_page_color(idx, 0);
1284 +	} while (rPage == NULL);
1285 +        
1286 +            
1287 +        set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]);
1288 +        bank_index[node] = (bank_index[node]+1) % counting_one_set(bank_partition[node]);
1289 +	return rPage; 
1290 +}
1291 +
1292 +
1293 +/*
1294 + * Reclaim pages.
1295 + */
1296 +void reclaim_page(struct page *page)
1297 +{
1298 +	const unsigned long color = page_list_index(page);
1299 +	unsigned long nr_reclaimed = 0;
1300 +	spin_lock(&reclaim_lock);
1301 +    	put_page(page);
1302 +	add_page_to_color_list(page);
1303 +
1304 +	spin_unlock(&reclaim_lock);
1305 +	printk("Reclaimed page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1306 +}
1307 +
1308 +
1309 +/*
1310 + * Initialize the numbers of banks and cache colors 
1311 + */ 
1312 +static int __init init_variables(void)
1313 +{
1314 +	number_banks = counting_one_set(BANK_MASK); 
1315 +	number_banks = two_exp(number_banks); 
1316 +
1317 +	number_cachecolors = counting_one_set(CACHE_MASK);
1318 +	number_cachecolors = two_exp(number_cachecolors);
1319 +	NUM_PAGE_LIST = number_banks * number_cachecolors; 
1320 +        printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
1321 +	mutex_init(&void_lockdown_proc);
1322 +	spin_lock_init(&reclaim_lock);
1323 +
1324 +}
1325 +
1326 +
1327 +/*
1328 + * Initialize the page pool 
1329 + */
1330 +static int __init init_color_groups(void)
1331 +{
1332 +	struct color_group *cgroup;
1333 +	unsigned long i;
1334 +	int err = 0;
1335 +
1336 +        printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
1337 +        color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
1338 +
1339 +	if (!color_groups) {
1340 +		printk(KERN_WARNING "Could not allocate color groups.\n");
1341 +		err = -ENOMEM;
1342 +	}else{
1343 +
1344 +		for (i = 0; i < NUM_PAGE_LIST; ++i) {
1345 +			cgroup = &color_groups[i];
1346 +			atomic_set(&cgroup->nr_pages, 0);
1347 +			INIT_LIST_HEAD(&cgroup->list);
1348 +			spin_lock_init(&cgroup->lock);
1349 +		}
1350 +	}
1351 +        return err;
1352 +}
1353 +
1354 +int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1355 +		size_t *lenp, loff_t *ppos)
1356 +{
1357 +	int ret = 0, i = 0;
1358 +	mutex_lock(&void_lockdown_proc);
1359 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1360 +	if (ret)
1361 +		goto out;
1362 +	if (write) {
1363 +            printk("New set Partition : \n");
1364 +	    for(i =0;i <9;i++)
1365 +            {
1366 +                set_index[i] = 0;
1367 +                printk("set[%d] = %x \n", i, set_partition[i]);
1368 +            }
1369 +	}
1370 +out:
1371 +	mutex_unlock(&void_lockdown_proc);
1372 +	return ret;
1373 +}
1374 +
1375 +int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1376 +		size_t *lenp, loff_t *ppos)
1377 +{
1378 +	int ret = 0, i = 0;
1379 +	mutex_lock(&void_lockdown_proc);
1380 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1381 +	if (ret)
1382 +		goto out;
1383 +	if (write) {
1384 +	    for(i =0;i <9;i++)
1385 +            {
1386 +                bank_index[i] = 0;
1387 +            }
1388 +	}
1389 +out:
1390 +	mutex_unlock(&void_lockdown_proc);
1391 +	return ret;
1392 +}
1393 +
1394 +int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1395 +		size_t *lenp, loff_t *ppos)
1396 +{
1397 +	int ret = 0, i = 0;
1398 +	mutex_lock(&void_lockdown_proc);
1399 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1400 +	if (ret)
1401 +		goto out;
1402 +	if (write) {
1403 +            show_nr_pages();
1404 +	}
1405 +out:
1406 +	mutex_unlock(&void_lockdown_proc);
1407 +	return ret;
1408 +}
1409 +
1410 +int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1411 +		size_t *lenp, loff_t *ppos)
1412 +{
1413 +	int ret = 0, i = 0;
1414 +	mutex_lock(&void_lockdown_proc);
1415 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1416 +	if (ret)
1417 +		goto out;
1418 +	if (write) {
1419 +            do_add_pages();
1420 +	}
1421 +out:
1422 +	mutex_unlock(&void_lockdown_proc);
1423 +	return ret;
1424 +}
1425 +
1426 +static struct ctl_table cache_table[] =
1427 +{
1428 +        
1429 +	{
1430 +		.procname	= "C0_LA_set",
1431 +		.mode		= 0666,
1432 +		.proc_handler	= set_partition_handler,
1433 +		.data		= &set_partition[0],
1434 +		.maxlen		= sizeof(set_partition[0]),
1435 +		.extra1		= &set_partition_min,
1436 +		.extra2		= &set_partition_max,
1437 +	},	
1438 +	{
1439 +		.procname	= "C0_LB_set",
1440 +		.mode		= 0666,
1441 +		.proc_handler	= set_partition_handler,
1442 +		.data		= &set_partition[1],
1443 +		.maxlen		= sizeof(set_partition[1]),
1444 +		.extra1		= &set_partition_min,
1445 +		.extra2		= &set_partition_max,
1446 +	},	
1447 +	{
1448 +		.procname	= "C1_LA_set",
1449 +		.mode		= 0666,
1450 +		.proc_handler	= set_partition_handler,
1451 +		.data		= &set_partition[2],
1452 +		.maxlen		= sizeof(set_partition[2]),
1453 +		.extra1		= &set_partition_min,
1454 +		.extra2		= &set_partition_max,
1455 +	},
1456 +	{
1457 +		.procname	= "C1_LB_set",
1458 +		.mode		= 0666,
1459 +		.proc_handler	= set_partition_handler,
1460 +		.data		= &set_partition[3],
1461 +		.maxlen		= sizeof(set_partition[3]),
1462 +		.extra1		= &set_partition_min,
1463 +		.extra2		= &set_partition_max,
1464 +	},
1465 +	{
1466 +		.procname	= "C2_LA_set",
1467 +		.mode		= 0666,
1468 +		.proc_handler	= set_partition_handler,
1469 +		.data		= &set_partition[4],
1470 +		.maxlen		= sizeof(set_partition[4]),
1471 +		.extra1		= &set_partition_min,
1472 +		.extra2		= &set_partition_max,
1473 +	},
1474 +	{
1475 +		.procname	= "C2_LB_set",
1476 +		.mode		= 0666,
1477 +		.proc_handler	= set_partition_handler,
1478 +		.data		= &set_partition[5],
1479 +		.maxlen		= sizeof(set_partition[5]),
1480 +		.extra1		= &set_partition_min,
1481 +		.extra2		= &set_partition_max,
1482 +	},
1483 +	{
1484 +		.procname	= "C3_LA_set",
1485 +		.mode		= 0666,
1486 +		.proc_handler	= set_partition_handler,
1487 +		.data		= &set_partition[6],
1488 +		.maxlen		= sizeof(set_partition[6]),
1489 +		.extra1		= &set_partition_min,
1490 +		.extra2		= &set_partition_max,
1491 +	},
1492 +	{
1493 +		.procname	= "C3_LB_set",
1494 +		.mode		= 0666,
1495 +		.proc_handler	= set_partition_handler,
1496 +		.data		= &set_partition[7],
1497 +		.maxlen		= sizeof(set_partition[7]),
1498 +		.extra1		= &set_partition_min,
1499 +		.extra2		= &set_partition_max,
1500 +	},	
1501 +	{
1502 +		.procname	= "Call_LC_set",
1503 +		.mode		= 0666,
1504 +		.proc_handler	= set_partition_handler,
1505 +		.data		= &set_partition[8],
1506 +		.maxlen		= sizeof(set_partition[8]),
1507 +		.extra1		= &set_partition_min,
1508 +		.extra2		= &set_partition_max,
1509 +	},	
1510 +	{
1511 +		.procname	= "C0_LA_bank",
1512 +		.mode		= 0666,
1513 +		.proc_handler	= bank_partition_handler,
1514 +		.data		= &bank_partition[0],
1515 +		.maxlen		= sizeof(set_partition[0]),
1516 +		.extra1		= &bank_partition_min,
1517 +		.extra2		= &bank_partition_max,
1518 +	},
1519 +	{
1520 +		.procname	= "C0_LB_bank",
1521 +		.mode		= 0666,
1522 +		.proc_handler	= bank_partition_handler,
1523 +		.data		= &bank_partition[1],
1524 +		.maxlen		= sizeof(set_partition[1]),
1525 +		.extra1		= &bank_partition_min,
1526 +		.extra2		= &bank_partition_max,
1527 +	},		
1528 +	{
1529 +		.procname	= "C1_LA_bank",
1530 +		.mode		= 0666,
1531 +		.proc_handler	= bank_partition_handler,
1532 +		.data		= &bank_partition[2],
1533 +		.maxlen		= sizeof(set_partition[2]),
1534 +		.extra1		= &bank_partition_min,
1535 +		.extra2		= &bank_partition_max,
1536 +	},
1537 +	{
1538 +		.procname	= "C1_LB_bank",
1539 +		.mode		= 0666,
1540 +		.proc_handler	= bank_partition_handler,
1541 +		.data		= &bank_partition[3],
1542 +		.maxlen		= sizeof(set_partition[3]),
1543 +		.extra1		= &bank_partition_min,
1544 +		.extra2		= &bank_partition_max,
1545 +	},
1546 +	{
1547 +		.procname	= "C2_LA_bank",
1548 +		.mode		= 0666,
1549 +		.proc_handler	= bank_partition_handler,
1550 +		.data		= &bank_partition[4],
1551 +		.maxlen		= sizeof(set_partition[4]),
1552 +		.extra1		= &bank_partition_min,
1553 +		.extra2		= &bank_partition_max,
1554 +	},	
1555 +	{
1556 +		.procname	= "C2_LB_bank",
1557 +		.mode		= 0666,
1558 +		.proc_handler	= bank_partition_handler,
1559 +		.data		= &bank_partition[5],
1560 +		.maxlen		= sizeof(set_partition[5]),
1561 +		.extra1		= &bank_partition_min,
1562 +		.extra2		= &bank_partition_max,
1563 +	},		
1564 +	{
1565 +		.procname	= "C3_LA_bank",
1566 +		.mode		= 0666,
1567 +		.proc_handler	= bank_partition_handler,
1568 +		.data		= &bank_partition[6],
1569 +		.maxlen		= sizeof(set_partition[6]),
1570 +		.extra1		= &bank_partition_min,
1571 +		.extra2		= &bank_partition_max,
1572 +	},	
1573 +	{
1574 +		.procname	= "C3_LB_bank",
1575 +		.mode		= 0666,
1576 +		.proc_handler	= bank_partition_handler,
1577 +		.data		= &bank_partition[7],
1578 +		.maxlen		= sizeof(set_partition[7]),
1579 +		.extra1		= &bank_partition_min,
1580 +		.extra2		= &bank_partition_max,
1581 +	},	
1582 +	{
1583 +		.procname	= "Call_LC_bank",
1584 +		.mode		= 0666,
1585 +		.proc_handler	= bank_partition_handler,
1586 +		.data		= &bank_partition[8],
1587 +		.maxlen		= sizeof(set_partition[8]),
1588 +		.extra1		= &bank_partition_min,
1589 +		.extra2		= &bank_partition_max,
1590 +	},	
1591 +	{
1592 +		.procname	= "show_page_pool",
1593 +		.mode		= 0666,
1594 +		.proc_handler	= show_page_pool_handler,
1595 +		.data		= &show_page_pool,
1596 +		.maxlen		= sizeof(show_page_pool),
1597 +	},		{
1598 +		.procname	= "refill_page_pool",
1599 +		.mode		= 0666,
1600 +		.proc_handler	= refill_page_pool_handler,
1601 +		.data		= &refill_page_pool,
1602 +		.maxlen		= sizeof(refill_page_pool),
1603 +	},	
1604 +	{ }
1605 +};
1606 +
1607 +static struct ctl_table litmus_dir_table[] = {
1608 +	{
1609 +		.procname	= "litmus",
1610 + 		.mode		= 0555,
1611 +		.child		= cache_table,
1612 +	},
1613 +	{ }
1614 +};
1615 +
1616 +
1617 +static struct ctl_table_header *litmus_sysctls;
1618 +
1619 +
1620 +/*
1621 + * Initialzie this proc 
1622 + */
1623 +static int __init litmus_color_init(void)
1624 +{
1625 +	int err=0;
1626 +        printk("Init bankproc.c\n");
1627 +
1628 +	init_variables();
1629 +
1630 +	printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
1631 +
1632 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
1633 +	if (!litmus_sysctls) {
1634 +		printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
1635 +		err = -EFAULT;
1636 +		goto out;
1637 +	}
1638 +
1639 +	init_color_groups();			
1640 +	do_add_pages();
1641 +
1642 +	printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
1643 +out:
1644 +	return err;
1645 +}
1646 +
1647 +module_init(litmus_color_init);
1648 +
1649 diff --git a/litmus/budget.c b/litmus/budget.c
1650 index 47bf78a..d67f4b3 100644
1651 --- a/litmus/budget.c
1652 +++ b/litmus/budget.c
1653 @@ -1,9 +1,11 @@
1654  #include <linux/sched.h>
1655  #include <linux/percpu.h>
1656  #include <linux/hrtimer.h>
1657 +#include <linux/uaccess.h>
1658  
1659  #include <litmus/litmus.h>
1660  #include <litmus/preempt.h>
1661 +#include <litmus/sched_plugin.h>
1662  
1663  #include <litmus/budget.h>
1664  
1665 @@ -113,4 +115,54 @@ static int __init init_budget_enforcement(void)
1666  	return 0;
1667  }
1668  
1669 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining)
1670 +{
1671 +	struct task_struct *t = current;
1672 +	unsigned long flags;
1673 +	s64 delta;
1674 +
1675 +	local_irq_save(flags);
1676 +
1677 +	delta = sched_clock_cpu(smp_processor_id()) - t->se.exec_start;
1678 +	if (delta < 0)
1679 +		delta = 0;
1680 +
1681 +	TRACE_CUR("current_budget: sc:%llu start:%llu lt_t:%llu delta:%lld exec-time:%llu rem:%llu\n",
1682 +		sched_clock_cpu(smp_processor_id()), t->se.exec_start,
1683 +		litmus_clock(), delta,
1684 +		tsk_rt(t)->job_params.exec_time,
1685 +		budget_remaining(t));
1686 +
1687 +	if (used_so_far)
1688 +		*used_so_far = tsk_rt(t)->job_params.exec_time + delta;
1689 +
1690 +	if (remaining) {
1691 +		*remaining = budget_remaining(t);
1692 +		if (*remaining > delta)
1693 +			*remaining -= delta;
1694 +		else
1695 +			*remaining = 0;
1696 +	}
1697 +
1698 +	local_irq_restore(flags);
1699 +}
1700 +
1701 +asmlinkage long sys_get_current_budget(
1702 +	lt_t __user * _expended,
1703 +	lt_t __user *_remaining)
1704 +{
1705 +	lt_t expended = 0, remaining = 0;
1706 +
1707 +	if (is_realtime(current))
1708 +		litmus->current_budget(&expended, &remaining);
1709 +
1710 +	if (_expended && put_user(expended, _expended))
1711 +		return -EFAULT;
1712 +
1713 +	if (_remaining && put_user(remaining, _remaining))
1714 +		return -EFAULT;
1715 +
1716 +	return 0;
1717 +}
1718 +
1719  module_init(init_budget_enforcement);
1720 diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
1721 new file mode 100644
1722 index 0000000..15c1b01
1723 --- /dev/null
1724 +++ b/litmus/cache_proc.c
1725 @@ -0,0 +1,1336 @@
1726 +#include <asm/uaccess.h>
1727 +#include <linux/uaccess.h>
1728 +#include <linux/init.h>
1729 +#include <linux/types.h>
1730 +#include <linux/kernel.h>
1731 +#include <linux/module.h>
1732 +#include <linux/sysctl.h>
1733 +#include <linux/slab.h>
1734 +#include <linux/io.h>
1735 +#include <linux/mutex.h>
1736 +#include <linux/time.h>
1737 +#include <linux/random.h>
1738 +
1739 +#include <litmus/litmus_proc.h>
1740 +#include <litmus/sched_trace.h>
1741 +#include <litmus/cache_proc.h>
1742 +#include <litmus/mc2_common.h>
1743 +
1744 +#include <asm/hardware/cache-l2x0.h>
1745 +#include <asm/cacheflush.h>
1746 +
1747 +
1748 +#define UNLOCK_ALL	0x00000000 /* allocation in any way */
1749 +#define LOCK_ALL        (~UNLOCK_ALL)
1750 +#define MAX_NR_WAYS	16
1751 +#define MAX_NR_COLORS	16
1752 +#define CACHELINE_SIZE 32
1753 +#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
1754 +#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
1755 +
1756 +typedef struct cacheline
1757 +{
1758 +        int line[INTS_IN_CACHELINE];
1759 +} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
1760 +
1761 +void mem_lock(u32 lock_val, int cpu);
1762 +
1763 +/*
1764 + * unlocked_way[i] : allocation can occur in way i
1765 + *
1766 + * 0 = allocation can occur in the corresponding way
1767 + * 1 = allocation cannot occur in the corresponding way
1768 + */
1769 +u32 unlocked_way[MAX_NR_WAYS]  = {
1770 +	0xFFFFFFFE, /* way 0 unlocked */
1771 +	0xFFFFFFFD,
1772 +	0xFFFFFFFB,
1773 +	0xFFFFFFF7,
1774 +	0xFFFFFFEF, /* way 4 unlocked */
1775 +	0xFFFFFFDF,
1776 +	0xFFFFFFBF,
1777 +	0xFFFFFF7F,
1778 +	0xFFFFFEFF, /* way 8 unlocked */
1779 +	0xFFFFFDFF,
1780 +	0xFFFFFBFF,
1781 +	0xFFFFF7FF,
1782 +	0xFFFFEFFF, /* way 12 unlocked */
1783 +	0xFFFFDFFF,
1784 +	0xFFFFBFFF,
1785 +	0xFFFF7FFF,
1786 +};
1787 +
1788 +u32 nr_unlocked_way[MAX_NR_WAYS+1]  = {
1789 +	0x0000FFFF, /* all ways are locked. usable = 0*/
1790 +	0x0000FFFE, /* way ~0 unlocked. usable = 1 */
1791 +	0x0000FFFC,
1792 +	0x0000FFF8,
1793 +	0x0000FFF0,
1794 +	0x0000FFE0,
1795 +	0x0000FFC0,
1796 +	0x0000FF80,
1797 +	0x0000FF00,
1798 +	0x0000FE00,
1799 +	0x0000FC00,
1800 +	0x0000F800,
1801 +	0x0000F000,
1802 +	0x0000E000,
1803 +	0x0000C000,
1804 +	0x00008000,
1805 +	0x00000000, /* way ~15 unlocked. usable = 16 */
1806 +};
1807 +
1808 +u32 way_partition[4] = {
1809 +	0xfffffff0, /* cpu0 */
1810 +	0xffffff0f, /* cpu1 */
1811 +	0xfffff0ff, /* cpu2 */
1812 +	0xffff0fff, /* cpu3 */
1813 +};
1814 +
1815 +u32 way_partitions[9] = {
1816 +	0xffff0003, /* cpu0 A */
1817 +	0xffff0003, /* cpu0 B */
1818 +	0xffff000C, /* cpu1 A */
1819 +	0xffff000C, /* cpu1 B */
1820 +	0xffff0030, /* cpu2 A */
1821 +	0xffff0030, /* cpu2 B */
1822 +	0xffff00C0, /* cpu3 A */
1823 +	0xffff00C0, /* cpu3 B */
1824 +	0xffffff00, /* lv C */
1825 +};
1826 +
1827 +u32 prev_lockdown_d_reg[5] = {
1828 +	0x0000FF00,
1829 +	0x0000FF00,
1830 +	0x0000FF00,
1831 +	0x0000FF00,
1832 +	0x000000FF, /* share with level-C */
1833 +};
1834 +
1835 +u32 prev_lockdown_i_reg[5] = {
1836 +	0x0000FF00,
1837 +	0x0000FF00,
1838 +	0x0000FF00,
1839 +	0x0000FF00,
1840 +	0x000000FF, /* share with level-C */
1841 +};
1842 +
1843 +u32 prev_lbm_i_reg[8] = {
1844 +	0x00000000,
1845 +	0x00000000,
1846 +	0x00000000,
1847 +	0x00000000,
1848 +	0x00000000,
1849 +	0x00000000,
1850 +	0x00000000,
1851 +	0x00000000,
1852 +};
1853 +
1854 +u32 prev_lbm_d_reg[8] = {
1855 +	0x00000000,
1856 +	0x00000000,
1857 +	0x00000000,
1858 +	0x00000000,
1859 +	0x00000000,
1860 +	0x00000000,
1861 +	0x00000000,
1862 +	0x00000000,
1863 +};
1864 +
1865 +static void __iomem *cache_base;
1866 +static void __iomem *lockreg_d;
1867 +static void __iomem *lockreg_i;
1868 +
1869 +static u32 cache_id;
1870 +
1871 +struct mutex actlr_mutex;
1872 +struct mutex l2x0_prefetch_mutex;
1873 +struct mutex lockdown_proc;
1874 +static u32 way_partition_min;
1875 +static u32 way_partition_max;
1876 +
1877 +static int zero = 0;
1878 +static int one = 1;
1879 +
1880 +static int l1_prefetch_proc;
1881 +static int l2_prefetch_hint_proc;
1882 +static int l2_double_linefill_proc;
1883 +static int l2_data_prefetch_proc;
1884 +static int os_isolation;
1885 +static int use_part;
1886 +
1887 +u32 lockdown_reg[9] = {
1888 +	0x00000000,
1889 +	0x00000000,
1890 +	0x00000000,
1891 +	0x00000000,
1892 +	0x00000000,
1893 +	0x00000000,
1894 +	0x00000000,
1895 +	0x00000000,
1896 +};
1897 +	
1898 +
1899 +#define ld_d_reg(cpu) ({ int __cpu = cpu; \
1900 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
1901 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
1902 +#define ld_i_reg(cpu) ({ int __cpu = cpu; \
1903 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
1904 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
1905 +
1906 +int lock_all;
1907 +int nr_lockregs;
1908 +static raw_spinlock_t cache_lock;
1909 +static raw_spinlock_t prefetch_lock;
1910 +static void ***flusher_pages = NULL;
1911 +
1912 +extern void l2c310_flush_all(void);
1913 +
1914 +static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
1915 +{
1916 +	/* wait for cache operation by line or way to complete */
1917 +	while (readl_relaxed(reg) & mask)
1918 +		cpu_relax();
1919 +}
1920 +
1921 +#ifdef CONFIG_CACHE_L2X0 
1922 +static inline void cache_wait(void __iomem *reg, unsigned long mask)
1923 +{
1924 +	/* cache operations by line are atomic on PL310 */
1925 +}
1926 +#else
1927 +#define cache_wait	cache_wait_way
1928 +#endif
1929 +
1930 +static inline void cache_sync(void)
1931 +{
1932 +	void __iomem *base = cache_base;
1933 +
1934 +	writel_relaxed(0, base + L2X0_CACHE_SYNC);
1935 +	cache_wait(base + L2X0_CACHE_SYNC, 1);
1936 +}
1937 +
1938 +static void print_lockdown_registers(int cpu)
1939 +{
1940 +	int i;
1941 +	//for (i = 0; i < nr_lockregs; i++) {
1942 +	for (i = 0; i < 4; i++) {
1943 +		printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
1944 +				i, readl_relaxed(ld_d_reg(i)));
1945 +		printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
1946 +				i, readl_relaxed(ld_i_reg(i)));
1947 +	}
1948 +}
1949 +
1950 +static void test_lockdown(void *ignore)
1951 +{
1952 +	int i, cpu;
1953 +
1954 +	cpu = smp_processor_id();
1955 +	printk("Start lockdown test on CPU %d.\n", cpu);
1956 +
1957 +	for (i = 0; i < nr_lockregs; i++) {
1958 +		printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
1959 +		printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i));
1960 +	}
1961 +
1962 +	printk("Lockdown initial state:\n");
1963 +	print_lockdown_registers(cpu);
1964 +	printk("---\n");
1965 +
1966 +	for (i = 0; i < nr_lockregs; i++) {
1967 +		writel_relaxed(1, ld_d_reg(i));
1968 +		writel_relaxed(2, ld_i_reg(i));
1969 +	}
1970 +	printk("Lockdown all data=1 instr=2:\n");
1971 +	print_lockdown_registers(cpu);
1972 +	printk("---\n");
1973 +
1974 +	for (i = 0; i < nr_lockregs; i++) {
1975 +		writel_relaxed((1 << i), ld_d_reg(i));
1976 +		writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
1977 +	}
1978 +	printk("Lockdown varies:\n");
1979 +	print_lockdown_registers(cpu);
1980 +	printk("---\n");
1981 +
1982 +	for (i = 0; i < nr_lockregs; i++) {
1983 +		writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
1984 +		writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
1985 +	}
1986 +	printk("Lockdown all zero:\n");
1987 +	print_lockdown_registers(cpu);
1988 +
1989 +	printk("End lockdown test.\n");
1990 +}
1991 +
1992 +void litmus_setup_lockdown(void __iomem *base, u32 id)
1993 +{
1994 +	cache_base = base;
1995 +	cache_id = id;
1996 +	lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
1997 +	lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
1998 +    
1999 +	if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
2000 +		nr_lockregs = 8;
2001 +	} else {
2002 +		printk("Unknown cache ID!\n");
2003 +		nr_lockregs = 1;
2004 +	}
2005 +	
2006 +	mutex_init(&actlr_mutex);
2007 +	mutex_init(&l2x0_prefetch_mutex);
2008 +	mutex_init(&lockdown_proc);
2009 +	raw_spin_lock_init(&cache_lock);
2010 +	raw_spin_lock_init(&prefetch_lock);
2011 +	
2012 +	test_lockdown(NULL);
2013 +}
2014 +
2015 +int way_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2016 +		size_t *lenp, loff_t *ppos)
2017 +{
2018 +	int ret = 0, i;
2019 +	unsigned long flags;
2020 +	
2021 +	mutex_lock(&lockdown_proc);
2022 +	
2023 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2024 +	if (ret)
2025 +		goto out;
2026 +	
2027 +	if (write) {
2028 +		printk("Way-partition settings:\n");
2029 +		for (i = 0; i < 9; i++) {
2030 +			printk("0x%08X\n", way_partitions[i]);
2031 +		}
2032 +		for (i = 0; i < 4; i++) {
2033 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2034 +				       i * L2X0_LOCKDOWN_STRIDE);
2035 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2036 +				       i * L2X0_LOCKDOWN_STRIDE);
2037 +		}
2038 +	}
2039 +	
2040 +	local_irq_save(flags);
2041 +	print_lockdown_registers(smp_processor_id());
2042 +	l2c310_flush_all();
2043 +	local_irq_restore(flags);
2044 +out:
2045 +	mutex_unlock(&lockdown_proc);
2046 +	return ret;
2047 +}
2048 +
2049 +int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
2050 +		size_t *lenp, loff_t *ppos)
2051 +{
2052 +	int ret = 0, i;
2053 +	unsigned long flags;
2054 +	
2055 +	mutex_lock(&lockdown_proc);
2056 +	
2057 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2058 +	if (ret)
2059 +		goto out;
2060 +	
2061 +	if (write && lock_all == 1) {
2062 +		for (i = 0; i < nr_lockregs; i++) {
2063 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2064 +				       i * L2X0_LOCKDOWN_STRIDE);
2065 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2066 +				       i * L2X0_LOCKDOWN_STRIDE);
2067 +		}
2068 +/*		
2069 +		for (i = 0; i < nr_lockregs;  i++) {
2070 +			barrier();
2071 +			mem_lock(LOCK_ALL, i);
2072 +			barrier();
2073 +			//writel_relaxed(nr_unlocked_way[0], ld_d_reg(i));
2074 +			//writel_relaxed(nr_unlocked_way[0], ld_i_reg(i));
2075 +		}
2076 +*/		
2077 +	}
2078 +	if (write && lock_all == 0) {
2079 +		for (i = 0; i < nr_lockregs; i++) {
2080 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2081 +				       i * L2X0_LOCKDOWN_STRIDE);
2082 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2083 +				       i * L2X0_LOCKDOWN_STRIDE);
2084 +		}
2085 +
2086 +	}
2087 +
2088 +	local_irq_save(flags);
2089 +	print_lockdown_registers(smp_processor_id());
2090 +	l2c310_flush_all();
2091 +	local_irq_restore(flags);
2092 +out:
2093 +	mutex_unlock(&lockdown_proc);
2094 +	return ret;
2095 +}
2096 +
2097 +void cache_lockdown(u32 lock_val, int cpu)
2098 +{
2099 +	__asm__ __volatile__ (
2100 +"	str	%[lockval], [%[dcachereg]]\n"
2101 +"	str	%[lockval], [%[icachereg]]\n"
2102 +	: 
2103 +	: [dcachereg] "r" (ld_d_reg(cpu)),
2104 +	  [icachereg] "r" (ld_i_reg(cpu)),
2105 +	  [lockval] "r" (lock_val)
2106 +	: "cc");
2107 +}
2108 +
2109 +void do_partition(enum crit_level lv, int cpu)
2110 +{
2111 +	u32 regs;
2112 +	unsigned long flags;
2113 +	
2114 +	if (lock_all || !use_part)
2115 +		return;
2116 +	raw_spin_lock_irqsave(&cache_lock, flags);
2117 +	switch(lv) {
2118 +		case CRIT_LEVEL_A:
2119 +			regs = ~way_partitions[cpu*2];
2120 +			regs &= 0x0000ffff;
2121 +			break;
2122 +		case CRIT_LEVEL_B:
2123 +			regs = ~way_partitions[cpu*2+1];
2124 +			regs &= 0x0000ffff;
2125 +			break;
2126 +		case CRIT_LEVEL_C:
2127 +		case NUM_CRIT_LEVELS:
2128 +			regs = ~way_partitions[8];
2129 +			regs &= 0x0000ffff;
2130 +			break;
2131 +		default:
2132 +			BUG();
2133 +
2134 +	}
2135 +	barrier();
2136 +
2137 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2138 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2139 +	barrier();
2140 +
2141 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
2142 +}
2143 +
2144 +void lock_cache(int cpu, u32 val)
2145 +{
2146 +	unsigned long flags;
2147 +	
2148 +	local_irq_save(flags);
2149 +	if (val != 0xffffffff) {
2150 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2151 +					   cpu * L2X0_LOCKDOWN_STRIDE);
2152 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2153 +					   cpu * L2X0_LOCKDOWN_STRIDE);
2154 +	}
2155 +	else {
2156 +		int i;
2157 +		for (i = 0; i < 4; i++)
2158 +			do_partition(CRIT_LEVEL_A, i);
2159 +	}
2160 +	local_irq_restore(flags);
2161 +}
2162 +
2163 +int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
2164 +		size_t *lenp, loff_t *ppos)
2165 +{
2166 +	int ret = 0;
2167 +	
2168 +	mutex_lock(&lockdown_proc);
2169 +
2170 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2171 +	if (ret)
2172 +		goto out;
2173 +	
2174 +
2175 +	printk("USE_PART HANDLER = %d\n", use_part);
2176 +
2177 +out:
2178 +	mutex_unlock(&lockdown_proc);
2179 +	return ret;
2180 +}
2181 +
2182 +int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
2183 +		size_t *lenp, loff_t *ppos)
2184 +{
2185 +	int ret = 0;
2186 +	
2187 +	mutex_lock(&lockdown_proc);
2188 +	
2189 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2190 +	if (ret)
2191 +		goto out;
2192 +	
2193 +
2194 +	printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
2195 +
2196 +out:
2197 +	mutex_unlock(&lockdown_proc);
2198 +	return ret;
2199 +}
2200 +
2201 +int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
2202 +		size_t *lenp, loff_t *ppos)
2203 +{
2204 +	int ret = 0, i;
2205 +	
2206 +	mutex_lock(&lockdown_proc);
2207 +	
2208 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2209 +	if (ret)
2210 +		goto out;
2211 +	
2212 +	if (write) {
2213 +		for (i = 0; i < nr_lockregs; i++) {
2214 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2215 +				       i * L2X0_LOCKDOWN_STRIDE);
2216 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2217 +				       i * L2X0_LOCKDOWN_STRIDE);
2218 +		}
2219 +	}
2220 +
2221 +out:
2222 +	mutex_unlock(&lockdown_proc);
2223 +	return ret;
2224 +}
2225 +
2226 +int lockdown_global_handler(struct ctl_table *table, int write, void __user *buffer,
2227 +		size_t *lenp, loff_t *ppos)
2228 +{
2229 +	int ret = 0, i;
2230 +	
2231 +	mutex_lock(&lockdown_proc);
2232 +	
2233 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2234 +	if (ret)
2235 +		goto out;
2236 +	
2237 +	if (write) {
2238 +		for (i = 0; i < nr_lockregs; i++) {
2239 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2240 +				       i * L2X0_LOCKDOWN_STRIDE);
2241 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2242 +				       i * L2X0_LOCKDOWN_STRIDE);
2243 +		}
2244 +	}
2245 +
2246 +out:
2247 +	mutex_unlock(&lockdown_proc);
2248 +	return ret;
2249 +}
2250 +
2251 +void inline enter_irq_mode(void)
2252 +{
2253 +	int cpu = smp_processor_id();
2254 +
2255 +	if (os_isolation == 0)
2256 +		return;	
2257 +	prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2258 +	prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2259 +	
2260 +	writel_relaxed(way_partitions[8], ld_i_reg(cpu));
2261 +	writel_relaxed(way_partitions[8], ld_d_reg(cpu));
2262 +}
2263 +
2264 +void inline exit_irq_mode(void)
2265 +{
2266 +	int cpu = smp_processor_id();
2267 +
2268 +	if (os_isolation == 0)
2269 +		return;
2270 +	writel_relaxed(prev_lockdown_i_reg[cpu], ld_i_reg(cpu));
2271 +	writel_relaxed(prev_lockdown_d_reg[cpu], ld_d_reg(cpu));	
2272 +}
2273 +
2274 +/* Operate on the Cortex-A9's ACTLR register */
2275 +#define ACTLR_L2_PREFETCH_HINT	(1 << 1)
2276 +#define ACTLR_L1_PREFETCH	(1 << 2)
2277 +
2278 +/*
2279 + * Change the ACTLR.
2280 + * @mode	- If 1 (0), set (clear) the bit given in @mask in the ACTLR.
2281 + * @mask	- A mask in which one bit is set to operate on the ACTLR.
2282 + */
2283 +static void actlr_change(int mode, int mask)
2284 +{
2285 +	u32 orig_value, new_value, reread_value;
2286 +
2287 +	if (0 != mode && 1 != mode) {
2288 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2289 +				__FUNCTION__);
2290 +		return;
2291 +	}
2292 +
2293 +	/* get the original value */
2294 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (orig_value));
2295 +
2296 +	if (0 == mode)
2297 +		new_value = orig_value & ~(mask);
2298 +	else
2299 +		new_value = orig_value | mask;
2300 +
2301 +	asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (new_value));
2302 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (reread_value));
2303 +
2304 +	printk("ACTLR: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2305 +			orig_value, new_value, reread_value);
2306 +}
2307 +
2308 +int litmus_l1_prefetch_proc_handler(struct ctl_table *table, int write,
2309 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2310 +{
2311 +	int ret, mode;
2312 +
2313 +	mutex_lock(&actlr_mutex);
2314 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2315 +
2316 +	if (!ret && write) {
2317 +		mode = *((int*)table->data);
2318 +		actlr_change(mode, ACTLR_L1_PREFETCH);
2319 +	}
2320 +	mutex_unlock(&actlr_mutex);
2321 +
2322 +	return ret;
2323 +}
2324 +
2325 +int litmus_l2_prefetch_hint_proc_handler(struct ctl_table *table, int write,
2326 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2327 +{
2328 +	int ret, mode;
2329 +
2330 +	mutex_lock(&actlr_mutex);
2331 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2332 +	if (!ret && write) {
2333 +		mode = *((int*)table->data);
2334 +		actlr_change(mode, ACTLR_L2_PREFETCH_HINT);
2335 +	}
2336 +	mutex_unlock(&actlr_mutex);
2337 +
2338 +	return ret;
2339 +}
2340 +
2341 +
2342 +/* Operate on the PL-310's Prefetch Control Register, L310_PREFETCH_CTRL */
2343 +#define L2X0_PREFETCH_DOUBLE_LINEFILL	(1 << 30)
2344 +#define L2X0_PREFETCH_INST_PREFETCH	(1 << 29)
2345 +#define L2X0_PREFETCH_DATA_PREFETCH	(1 << 28)
2346 +static void l2x0_prefetch_change(int mode, int mask)
2347 +{
2348 +	u32 orig_value, new_value, reread_value;
2349 +
2350 +	if (0 != mode && 1 != mode) {
2351 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2352 +				__FUNCTION__);
2353 +		return;
2354 +	}
2355 +
2356 +	orig_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2357 +
2358 +	if (0 == mode)
2359 +		new_value = orig_value & ~(mask);
2360 +	else
2361 +		new_value = orig_value | mask;
2362 +
2363 +	writel_relaxed(new_value, cache_base + L310_PREFETCH_CTRL);
2364 +	reread_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2365 +
2366 +	printk("l2x0 prefetch: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2367 +			orig_value, new_value, reread_value);
2368 +}
2369 +
2370 +int litmus_l2_double_linefill_proc_handler(struct ctl_table *table, int write,
2371 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2372 +{
2373 +	int ret, mode;
2374 +
2375 +	mutex_lock(&l2x0_prefetch_mutex);
2376 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2377 +	if (!ret && write) {
2378 +		mode = *((int*)table->data);
2379 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DOUBLE_LINEFILL);
2380 +	}
2381 +	mutex_unlock(&l2x0_prefetch_mutex);
2382 +
2383 +	return ret;
2384 +}
2385 +
2386 +int litmus_l2_data_prefetch_proc_handler(struct ctl_table *table, int write,
2387 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2388 +{
2389 +	int ret, mode;
2390 +
2391 +	mutex_lock(&l2x0_prefetch_mutex);
2392 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2393 +	if (!ret && write) {
2394 +		mode = *((int*)table->data);
2395 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DATA_PREFETCH|L2X0_PREFETCH_INST_PREFETCH);
2396 +	}
2397 +	mutex_unlock(&l2x0_prefetch_mutex);
2398 +
2399 +	return ret;
2400 +}
2401 +
2402 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
2403 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2404 +
2405 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
2406 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2407 +		
2408 +static struct ctl_table cache_table[] =
2409 +{
2410 +	{
2411 +		.procname	= "C0_LA_way",
2412 +		.mode		= 0666,
2413 +		.proc_handler	= way_partition_handler,
2414 +		.data		= &way_partitions[0],
2415 +		.maxlen		= sizeof(way_partitions[0]),
2416 +		.extra1		= &way_partition_min,
2417 +		.extra2		= &way_partition_max,
2418 +	},	
2419 +	{
2420 +		.procname	= "C0_LB_way",
2421 +		.mode		= 0666,
2422 +		.proc_handler	= way_partition_handler,
2423 +		.data		= &way_partitions[1],
2424 +		.maxlen		= sizeof(way_partitions[1]),
2425 +		.extra1		= &way_partition_min,
2426 +		.extra2		= &way_partition_max,
2427 +	},	
2428 +	{
2429 +		.procname	= "C1_LA_way",
2430 +		.mode		= 0666,
2431 +		.proc_handler	= way_partition_handler,
2432 +		.data		= &way_partitions[2],
2433 +		.maxlen		= sizeof(way_partitions[2]),
2434 +		.extra1		= &way_partition_min,
2435 +		.extra2		= &way_partition_max,
2436 +	},
2437 +	{
2438 +		.procname	= "C1_LB_way",
2439 +		.mode		= 0666,
2440 +		.proc_handler	= way_partition_handler,
2441 +		.data		= &way_partitions[3],
2442 +		.maxlen		= sizeof(way_partitions[3]),
2443 +		.extra1		= &way_partition_min,
2444 +		.extra2		= &way_partition_max,
2445 +	},
2446 +	{
2447 +		.procname	= "C2_LA_way",
2448 +		.mode		= 0666,
2449 +		.proc_handler	= way_partition_handler,
2450 +		.data		= &way_partitions[4],
2451 +		.maxlen		= sizeof(way_partitions[4]),
2452 +		.extra1		= &way_partition_min,
2453 +		.extra2		= &way_partition_max,
2454 +	},
2455 +	{
2456 +		.procname	= "C2_LB_way",
2457 +		.mode		= 0666,
2458 +		.proc_handler	= way_partition_handler,
2459 +		.data		= &way_partitions[5],
2460 +		.maxlen		= sizeof(way_partitions[5]),
2461 +		.extra1		= &way_partition_min,
2462 +		.extra2		= &way_partition_max,
2463 +	},
2464 +	{
2465 +		.procname	= "C3_LA_way",
2466 +		.mode		= 0666,
2467 +		.proc_handler	= way_partition_handler,
2468 +		.data		= &way_partitions[6],
2469 +		.maxlen		= sizeof(way_partitions[6]),
2470 +		.extra1		= &way_partition_min,
2471 +		.extra2		= &way_partition_max,
2472 +	},
2473 +	{
2474 +		.procname	= "C3_LB_way",
2475 +		.mode		= 0666,
2476 +		.proc_handler	= way_partition_handler,
2477 +		.data		= &way_partitions[7],
2478 +		.maxlen		= sizeof(way_partitions[7]),
2479 +		.extra1		= &way_partition_min,
2480 +		.extra2		= &way_partition_max,
2481 +	},	
2482 +	{
2483 +		.procname	= "Call_LC_way",
2484 +		.mode		= 0666,
2485 +		.proc_handler	= way_partition_handler,
2486 +		.data		= &way_partitions[8],
2487 +		.maxlen		= sizeof(way_partitions[8]),
2488 +		.extra1		= &way_partition_min,
2489 +		.extra2		= &way_partition_max,
2490 +	},		
2491 +	{
2492 +		.procname	= "lock_all",
2493 +		.mode		= 0666,
2494 +		.proc_handler	= lock_all_handler,
2495 +		.data		= &lock_all,
2496 +		.maxlen		= sizeof(lock_all),
2497 +		.extra1		= &zero,
2498 +		.extra2		= &one,
2499 +	},
2500 +	{
2501 +		.procname	= "l1_prefetch",
2502 +		.mode		= 0644,
2503 +		.proc_handler	= litmus_l1_prefetch_proc_handler,
2504 +		.data		= &l1_prefetch_proc,
2505 +		.maxlen		= sizeof(l1_prefetch_proc),
2506 +	},
2507 +	{
2508 +		.procname	= "l2_prefetch_hint",
2509 +		.mode		= 0644,
2510 +		.proc_handler	= litmus_l2_prefetch_hint_proc_handler,
2511 +		.data		= &l2_prefetch_hint_proc,
2512 +		.maxlen		= sizeof(l2_prefetch_hint_proc),
2513 +	},
2514 +	{
2515 +		.procname	= "l2_double_linefill",
2516 +		.mode		= 0644,
2517 +		.proc_handler	= litmus_l2_double_linefill_proc_handler,
2518 +		.data		= &l2_double_linefill_proc,
2519 +		.maxlen		= sizeof(l2_double_linefill_proc),
2520 +	},
2521 +	{
2522 +		.procname	= "l2_data_prefetch",
2523 +		.mode		= 0644,
2524 +		.proc_handler	= litmus_l2_data_prefetch_proc_handler,
2525 +		.data		= &l2_data_prefetch_proc,
2526 +		.maxlen		= sizeof(l2_data_prefetch_proc),
2527 +	},
2528 +	{
2529 +		.procname	= "os_isolation",
2530 +		.mode		= 0644,
2531 +		.proc_handler	= os_isolation_proc_handler,
2532 +		.data		= &os_isolation,
2533 +		.maxlen		= sizeof(os_isolation),
2534 +	},
2535 +	{
2536 +		.procname	= "use_part",
2537 +		.mode		= 0644,
2538 +		.proc_handler	= use_part_proc_handler,
2539 +		.data		= &use_part,
2540 +		.maxlen		= sizeof(use_part),
2541 +	},
2542 +	{
2543 +		.procname	= "do_perf_test",
2544 +		.mode		= 0644,
2545 +		.proc_handler	= do_perf_test_proc_handler,
2546 +	},
2547 +	{
2548 +		.procname	= "setup_flusher",
2549 +		.mode		= 0644,
2550 +		.proc_handler	= setup_flusher_proc_handler,
2551 +	},
2552 +	{
2553 +		.procname	= "lockdown_reg_0",
2554 +		.mode		= 0644,
2555 +		.proc_handler	= lockdown_reg_handler,
2556 +		.data		= &lockdown_reg[0],
2557 +		.maxlen		= sizeof(lockdown_reg[0]),
2558 +		.extra1		= &way_partition_min,
2559 +		.extra2		= &way_partition_max,
2560 +	},
2561 +	{
2562 +		.procname	= "lockdown_reg_1",
2563 +		.mode		= 0644,
2564 +		.proc_handler	= lockdown_reg_handler,
2565 +		.data		= &lockdown_reg[1],
2566 +		.maxlen		= sizeof(lockdown_reg[1]),
2567 +		.extra1		= &way_partition_min,
2568 +		.extra2		= &way_partition_max,
2569 +	},
2570 +	{
2571 +		.procname	= "lockdown_reg_2",
2572 +		.mode		= 0644,
2573 +		.proc_handler	= lockdown_reg_handler,
2574 +		.data		= &lockdown_reg[2],
2575 +		.maxlen		= sizeof(lockdown_reg[2]),
2576 +		.extra1		= &way_partition_min,
2577 +		.extra2		= &way_partition_max,
2578 +	},
2579 +	{
2580 +		.procname	= "lockdown_reg_3",
2581 +		.mode		= 0644,
2582 +		.proc_handler	= lockdown_reg_handler,
2583 +		.data		= &lockdown_reg[3],
2584 +		.maxlen		= sizeof(lockdown_reg[3]),
2585 +		.extra1		= &way_partition_min,
2586 +		.extra2		= &way_partition_max,
2587 +	},
2588 +	{
2589 +		.procname	= "lockdown_regs",
2590 +		.mode		= 0644,
2591 +		.proc_handler	= lockdown_global_handler,
2592 +		.data		= &lockdown_reg[8],
2593 +		.maxlen		= sizeof(lockdown_reg[8]),
2594 +		.extra1		= &way_partition_min,
2595 +		.extra2		= &way_partition_max,
2596 +	},
2597 +	{ }
2598 +};
2599 +
2600 +static struct ctl_table litmus_dir_table[] = {
2601 +	{
2602 +		.procname	= "litmus",
2603 + 		.mode		= 0555,
2604 +		.child		= cache_table,
2605 +	},
2606 +	{ }
2607 +};
2608 +
2609 +u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
2610 +{
2611 +	u32 v = 0;
2612 +
2613 +	__asm__ __volatile__ (
2614 +"	.align 5\n"
2615 +"	str	%[lockval], [%[cachereg]]\n"
2616 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2617 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2618 +"	bgt	1b\n				@ read more, if necessary\n"
2619 +	: [addr] "+r" (start),
2620 +	  [val] "+r" (v)
2621 +	: [end] "r" (end),
2622 +#ifdef CONFIG_CACHE_L2X0
2623 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2624 +#else
2625 +	  [cachereg] "r" (lockreg_d),
2626 +#endif
2627 +	  [lockval] "r" (lock_val)
2628 +	: "cc");
2629 +
2630 +	return v;
2631 +}
2632 +
2633 +
2634 +/*
2635 + * Prefetch by reading the first word of each cache line in a page.
2636 + *
2637 + * @lockdown_reg: address of the lockdown register to write
2638 + * @lock_val: value to be written to @lockdown_reg
2639 + * @unlock_val: will unlock the cache to this value
2640 + * @addr: start address to be prefetched
2641 + * @end_addr: end address to prefetch (exclusive)
2642 + *
2643 + * Assumes: addr < end_addr AND addr != end_addr
2644 + */
2645 +u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
2646 +{
2647 +#ifndef CONFIG_CACHE_L2X0
2648 +	unsigned long flags;
2649 +#endif
2650 +	u32 v = 0;
2651 +
2652 +#ifndef CONFIG_CACHE_L2X0
2653 +	raw_spin_lock_irqsave(&prefetch_lock, flags);
2654 +#endif
2655 +
2656 +	__asm__ __volatile__ (
2657 +"	.align 5\n"
2658 +"	str	%[lockval], [%[cachereg]]\n"
2659 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2660 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2661 +"	bgt	1b\n				@ read more, if necessary\n"
2662 +"	str	%[unlockval], [%[cachereg]]\n"
2663 +	: [addr] "+r" (start),
2664 +	  [val] "+r" (v)
2665 +	: [end] "r" (end),
2666 +#ifdef CONFIG_CACHE_L2X0
2667 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2668 +#else
2669 +	  [cachereg] "r" (lockreg_d),
2670 +#endif
2671 +	  [lockval] "r" (lock_val),
2672 +	  [unlockval] "r" (unlock_val)
2673 +	: "cc");
2674 +
2675 +#ifndef CONFIG_CACHE_L2X0
2676 +	raw_spin_unlock_irqrestore(&prefetch_lock, flags);
2677 +#endif
2678 +
2679 +	return v;
2680 +}
2681 +
2682 +static long update_timeval(struct timespec lhs, struct timespec rhs)
2683 +{
2684 +	long val;
2685 +	struct timespec ts;
2686 +
2687 +	ts = timespec_sub(rhs, lhs);
2688 +	val = ts.tv_sec*NSEC_PER_SEC + ts.tv_nsec;
2689 +
2690 +	return val;
2691 +}
2692 +
2693 +extern void v7_flush_kern_dcache_area(void *, size_t);
2694 +extern void v7_flush_kern_cache_all(void);
2695 +/*
2696 + * Ensure that this page is not in the L1 or L2 cache.
2697 + * Since the L1 cache is VIPT and the L2 cache is PIPT, we can use either the
2698 + * kernel or user vaddr.
2699 + */
2700 +void color_flush_page(void *vaddr, size_t size)
2701 +{
2702 +	v7_flush_kern_dcache_area(vaddr, size);
2703 +	//v7_flush_kern_cache_all();
2704 +}
2705 +
2706 +extern struct page* get_colored_page(unsigned long color);
2707 +
2708 +int setup_flusher_array(void)
2709 +{
2710 +	int color, way, ret = 0;
2711 +	struct page *page;
2712 +
2713 +	if (flusher_pages != NULL)
2714 +		goto out;
2715 +
2716 +	flusher_pages = (void***) kmalloc(MAX_NR_WAYS
2717 +			* sizeof(*flusher_pages), GFP_KERNEL);
2718 +	if (!flusher_pages) {
2719 +		printk(KERN_WARNING "No memory for flusher array!\n");
2720 +		ret = -EINVAL;
2721 +		goto out;
2722 +	}
2723 +	for (way = 0; way < MAX_NR_WAYS; way++) {
2724 +		void **flusher_color_arr;
2725 +		flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
2726 +				* MAX_NR_COLORS, GFP_KERNEL);
2727 +		if (!flusher_color_arr) {
2728 +			printk(KERN_WARNING "No memory for flusher array!\n");
2729 +			ret = -ENOMEM;
2730 +			goto out_free;
2731 +		}
2732 +
2733 +		flusher_pages[way] = flusher_color_arr;
2734 +		for (color = 0; color < MAX_NR_COLORS; color++) {
2735 +			int node;
2736 +			node = color + 112; // populate from bank 7
2737 +			page = get_colored_page(node);
2738 +			if (!page) {
2739 +				printk(KERN_WARNING "no more colored pages\n");
2740 +				ret = -EINVAL;
2741 +				goto out_free;
2742 +			}
2743 +			flusher_pages[way][color] = page_address(page);
2744 +			if (!flusher_pages[way][color]) {
2745 +				printk(KERN_WARNING "bad page address\n");
2746 +				ret = -EINVAL;
2747 +				goto out_free;
2748 +			}
2749 +		}
2750 +	}
2751 +
2752 +out:
2753 +	return ret;
2754 +out_free:
2755 +	for (way = 0; way < MAX_NR_WAYS; way++) {
2756 +		for (color = 0; color < MAX_NR_COLORS; color++) {
2757 +			/* not bothering to try and give back colored pages */
2758 +		}
2759 +		kfree(flusher_pages[way]);
2760 +	}
2761 +	kfree(flusher_pages);
2762 +	flusher_pages = NULL;
2763 +	return ret;
2764 +}
2765 +
2766 +void flush_cache(int all)
2767 +{
2768 +	int way, color, cpu;
2769 +	unsigned long flags;
2770 +	
2771 +	raw_spin_lock_irqsave(&cache_lock, flags);
2772 +	cpu = raw_smp_processor_id();
2773 +	
2774 +	prev_lbm_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2775 +	prev_lbm_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2776 +	for (way=0;way<MAX_NR_WAYS;way++) {
2777 +		if (( (0x00000001 << way) & (prev_lbm_d_reg[cpu]) ) &&
2778 +			!all)
2779 +			continue;
2780 +		for (color=0;color<MAX_NR_COLORS;color++) {
2781 +			void *vaddr = flusher_pages[way][color];
2782 +			u32 lvalue  = unlocked_way[way];
2783 +			color_read_in_mem_lock(lvalue, LOCK_ALL,
2784 +					       vaddr, vaddr + PAGE_SIZE);
2785 +		}
2786 +
2787 +	}
2788 +
2789 +	writel_relaxed(prev_lbm_i_reg[cpu], ld_i_reg(cpu));
2790 +	writel_relaxed(prev_lbm_d_reg[cpu], ld_d_reg(cpu));
2791 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
2792 +}
2793 +
2794 +/* src = shared, dst = local */
2795 +#if 1 // random
2796 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
2797 +{
2798 +	/* size is in KB */
2799 +	long ret = 0;
2800 +	lt_t t1, t2;
2801 +	int numlines = size * CACHELINES_IN_1KB;
2802 +	int next, sum = 0, ran;
2803 +	unsigned long flags;
2804 +	
2805 +	get_random_bytes(&ran, sizeof(int));
2806 +	next = ran % ((size*1024)/sizeof(cacheline_t));
2807 +	
2808 +	//preempt_disable();
2809 +	if (type == 1) {
2810 +		int i, j;
2811 +		color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
2812 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2813 +		
2814 +		local_irq_save(flags);
2815 +		t1 = litmus_clock();
2816 +		for (i = 0; i < numlines; i++) {
2817 +			next = src[next].line[0];
2818 +			for (j = 1; j < INTS_IN_CACHELINE; j++) {
2819 +				//dst[next].line[j] = src[next].line[j]; // read
2820 +				src[next].line[j] = dst[next].line[j]; // write
2821 +			}			
2822 +		}
2823 +		t2 = litmus_clock();
2824 +		local_irq_restore(flags);
2825 +		sum = next + (int)t2;
2826 +		t2 -= t1;
2827 +		ret = put_user(t2, ts);
2828 +	}
2829 +	else {
2830 +		int i, j;
2831 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2832 +		local_irq_save(flags);
2833 +		t1 = litmus_clock();
2834 +		for (i = 0; i < numlines; i++) {
2835 +			next = src[next].line[0];
2836 +			for (j = 1; j < INTS_IN_CACHELINE; j++) {
2837 +				//dst[next].line[j] = src[next].line[j]; //read
2838 +				src[next].line[j] = dst[next].line[j]; //write
2839 +			}			
2840 +		}
2841 +		t2 = litmus_clock();
2842 +		local_irq_restore(flags);
2843 +		sum = next + (int)t2;
2844 +		t2 -= t1;
2845 +		ret = put_user(t2, ts);
2846 +		v7_flush_kern_dcache_area(src, size*1024);
2847 +	}
2848 +	//preempt_enable();
2849 +	flush_cache(1);
2850 +
2851 +	return ret;
2852 +}
2853 +#else
2854 +// sequential
2855 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
2856 +{
2857 +	/* size is in KB */
2858 +	long ret = 0;
2859 +	lt_t t1, t2;
2860 +	int numlines = size * CACHELINES_IN_1KB;
2861 +	int sum = 0;
2862 +	unsigned long flags;
2863 +	
2864 +	//preempt_disable();
2865 +	if (type == 1) {
2866 +		int i, j;
2867 +		color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
2868 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2869 +		
2870 +		local_irq_save(flags);
2871 +		t1 = litmus_clock();
2872 +		for (i = 0; i < numlines; i++) {
2873 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2874 +				//dst[i].line[j] = src[i].line[j]; // read
2875 +				src[i].line[j] = dst[i].line[j]; // write
2876 +			}			
2877 +		}
2878 +		t2 = litmus_clock();
2879 +		local_irq_restore(flags);
2880 +		sum = (int)(t1 + t2);
2881 +		t2 -= t1;
2882 +		ret = put_user(t2, ts);
2883 +	}
2884 +	else {
2885 +		int i, j;
2886 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2887 +		local_irq_save(flags);
2888 +		t1 = litmus_clock();
2889 +		for (i = 0; i < numlines; i++) {
2890 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2891 +				//dst[i].line[j] = src[i].line[j]; //read
2892 +				src[i].line[j] = dst[i].line[j]; //write
2893 +			}			
2894 +		}
2895 +		t2 = litmus_clock();
2896 +		local_irq_restore(flags);
2897 +		sum = (int)(t1 + t2);
2898 +		t2 -= t1;
2899 +		ret = put_user(t2, ts);
2900 +		v7_flush_kern_dcache_area(src, size*1024);
2901 +	}
2902 +	//preempt_enable();
2903 +	flush_cache(1);
2904 +
2905 +	return ret;
2906 +}
2907 +#endif
2908 +
2909 +asmlinkage long sys_lock_buffer(void* vaddr, size_t size, u32 lock_way, u32 unlock_way)
2910 +{
2911 +	/* size is in bytes */
2912 +	long ret = 0;
2913 +	int i;
2914 +	u32 lock_val, unlock_val;
2915 +	
2916 +	lock_val = ~lock_way & 0x0000ffff;
2917 +	unlock_val = ~unlock_way & 0x0000ffff;
2918 +	color_read_in_mem_lock(lock_val, unlock_val, (void*)vaddr, (void*)vaddr + size);
2919 +	
2920 +	return ret;
2921 +}
2922 +
2923 +#define TRIALS 1000
2924 +
2925 +static int perf_test(void) {
2926 +	struct timespec before, after;
2927 +	struct page *page;
2928 +	void *vaddr;
2929 +	u32 *data;
2930 +	long time, flush_time;
2931 +	int i, num_pages = 1;
2932 +	unsigned int order = 4;
2933 +
2934 +	for (i = 0; i < order; i++) {
2935 +		num_pages = num_pages*2;
2936 +	}
2937 +
2938 +	printk("Number of pages: %d\n", num_pages);
2939 +	//page = alloc_page(__GFP_MOVABLE);
2940 +	page = alloc_pages(__GFP_MOVABLE, order);
2941 +	if (!page) {
2942 +		printk(KERN_WARNING "No memory\n");
2943 +		return -ENOMEM;
2944 +	}
2945 +
2946 +	vaddr = page_address(page);
2947 +	if (!vaddr)
2948 +		printk(KERN_WARNING "%s: vaddr is null\n", __FUNCTION__);
2949 +	data = (u32*) vaddr;
2950 +
2951 +	getnstimeofday(&before);
2952 +	barrier();
2953 +	for (i = 0; i < TRIALS; i++) {
2954 +		color_flush_page(vaddr, PAGE_SIZE*num_pages);
2955 +	}
2956 +	barrier();
2957 +	getnstimeofday(&after);
2958 +	time = update_timeval(before, after);
2959 +	printk("Average for flushes without re-reading: %ld\n", time / TRIALS);
2960 +	flush_time = time / TRIALS;
2961 +
2962 +	color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2963 +	
2964 +	barrier();
2965 +	getnstimeofday(&before);
2966 +	barrier();
2967 +	for (i = 0; i < TRIALS; i++) {
2968 +		color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2969 +	}
2970 +	barrier();
2971 +	getnstimeofday(&after);
2972 +	time = update_timeval(before, after);
2973 +	printk("Average for read from cache: %ld\n", time / TRIALS);
2974 +
2975 +	getnstimeofday(&before);
2976 +	barrier();
2977 +	for (i = 0; i < TRIALS; i++) {
2978 +		color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2979 +		color_flush_page(vaddr, PAGE_SIZE*num_pages);
2980 +	}
2981 +	barrier();
2982 +	getnstimeofday(&after);
2983 +	time = update_timeval(before, after);
2984 +	printk("Average for read from mem: %ld (%ld)\n", time / TRIALS - flush_time, time / TRIALS);
2985 +
2986 +	// write in locked way
2987 +	color_read_in_mem_lock(nr_unlocked_way[2], LOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2988 +	for (i = 0; i < PAGE_SIZE*num_pages/sizeof(u32); i++) {
2989 +		data[i] = i%63353;
2990 +	}
2991 +	// read
2992 +	barrier();
2993 +	getnstimeofday(&before);
2994 +	barrier();
2995 +	for (i = 0; i < TRIALS; i++) {
2996 +		color_read_in_mem(unlocked_way[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2997 +	}
2998 +	barrier();
2999 +	getnstimeofday(&after);
3000 +	time = update_timeval(before, after);
3001 +	printk("Average for read in after write: %ld\n", time / TRIALS);
3002 +	
3003 +	
3004 +	//free_page((unsigned long)vaddr);
3005 +	free_pages((unsigned long)vaddr, order);
3006 +
3007 +	return 0;
3008 +}
3009 +
3010 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
3011 +		void __user *buffer, size_t *lenp, loff_t *ppos)
3012 +{
3013 +	int ret = 0;
3014 +
3015 +	if (write) {
3016 +		ret = perf_test();
3017 +	}
3018 +
3019 +	return ret;
3020 +}
3021 +
3022 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
3023 +		void __user *buffer, size_t *lenp, loff_t *ppos)
3024 +{
3025 +	int ret = -EINVAL;
3026 +
3027 +	if (write && flusher_pages == NULL) {
3028 +		ret = setup_flusher_array();
3029 +		printk(KERN_INFO "setup flusher return: %d\n", ret);
3030 +	
3031 +	}
3032 +	else if (flusher_pages) {
3033 +		printk(KERN_INFO "flusher_pages is already set!\n");
3034 +		ret = 0;
3035 +	}
3036 +	
3037 +	return ret;
3038 +}
3039 +
3040 +static struct ctl_table_header *litmus_sysctls;
3041 +
3042 +static int __init litmus_sysctl_init(void)
3043 +{
3044 +	int ret = 0;
3045 +
3046 +	printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n");
3047 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
3048 +	if (!litmus_sysctls) {
3049 +		printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n");
3050 +		ret = -EFAULT;
3051 +		goto out;
3052 +	}
3053 +
3054 +	way_partition_min = 0x00000000;
3055 +	way_partition_max = 0x0000FFFF;
3056 +	
3057 +out:
3058 +	return ret;
3059 +}
3060 +
3061 +module_init(litmus_sysctl_init);
3062 diff --git a/litmus/color_shm.c b/litmus/color_shm.c
3063 new file mode 100644
3064 index 0000000..d4913cd
3065 --- /dev/null
3066 +++ b/litmus/color_shm.c
3067 @@ -0,0 +1,402 @@
3068 +#include <linux/sched.h>
3069 +#include <linux/mm.h>
3070 +#include <linux/fs.h>
3071 +#include <linux/miscdevice.h>
3072 +#include <linux/spinlock.h>
3073 +#include <linux/module.h>
3074 +#include <linux/highmem.h>
3075 +#include <linux/slab.h>
3076 +#include <linux/mutex.h>
3077 +#include <asm/uaccess.h>
3078 +
3079 +#include <litmus/litmus.h>
3080 +
3081 +#define DEV_NAME	"litmus/color_shm"
3082 +
3083 +/* Major number assigned to our device. 
3084 + * Refer Documentation/devices.txt */
3085 +#define SHM_MAJOR			240
3086 +#define MAX_COLORED_PAGE	256
3087 +#define NUM_BANKS			8
3088 +#define NUM_COLORS			16
3089 +
3090 +static struct mutex dev_lock;
3091 +static int bypass_cache;
3092 +
3093 +struct color_ioctl_cmd {
3094 +	unsigned int color;
3095 +	unsigned int bank;
3096 +};
3097 +
3098 +struct color_ioctl_offset {
3099 +	unsigned long offset;
3100 +	int lock;
3101 +};
3102 +
3103 +#define SET_COLOR_SHM_CMD		_IOW(SHM_MAJOR, 0x1, struct color_ioctl_cmd)
3104 +#define SET_COLOR_SHM_OFFSET	_IOW(SHM_MAJOR, 0x2, struct color_ioctl_offset)
3105 +
3106 +struct color_ioctl_cmd color_param;
3107 +struct color_ioctl_offset color_offset;
3108 +
3109 +static int mmap_common_checks(struct vm_area_struct *vma)
3110 +{
3111 +	/* you can only map the "first" page */
3112 +	if (vma->vm_pgoff != 0)
3113 +		return -EINVAL;
3114 +
3115 +	return 0;
3116 +}
3117 +
3118 +static void mmap_common_vma_flags(struct vm_area_struct *vma)
3119 +{
3120 +	/* This mapping should not be kept across forks,
3121 +	 * cannot be expanded, and is not a "normal" page. */
3122 +	//vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO | VM_SHARED | VM_MAYSHARE;
3123 +	vma->vm_flags |= VM_SHARED | VM_MAYSHARE | VM_LOCKED;
3124 +
3125 +	/* We don't want the first write access to trigger a "minor" page fault
3126 +	 * to mark the page as dirty.  This is transient, private memory, we
3127 +	 * don't care if it was touched or not. __S011 means RW access, but not
3128 +	 * execute, and avoids copy-on-write behavior.
3129 +	 * See protection_map in mmap.c.  */
3130 +	vma->vm_page_prot = PAGE_SHARED;
3131 +}
3132 +
3133 +#define vma_nr_pages(vma) \
3134 +	({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;})
3135 +
3136 +extern struct page* get_colored_page(unsigned long color);
3137 +
3138 +static int do_map_colored_page(struct vm_area_struct *vma,
3139 +		const unsigned long addr,
3140 +		const unsigned long color_no)
3141 +{
3142 +	int err = 0;
3143 +	unsigned long offset = 2048;
3144 +	
3145 +	struct page *page = get_colored_page(color_no);
3146 +
3147 +	if (!page) {
3148 +		printk(KERN_INFO "Could not get page with color %lu.\n",
3149 +				color_no);
3150 +		err = -ENOMEM;
3151 +		goto out;
3152 +	}
3153 +
3154 +	printk(KERN_INFO "vma: %p  addr: 0x%lx  color_no: %lu\n",
3155 +			vma, addr, color_no);
3156 +	
3157 +	printk(KERN_INFO "vm_start: %lu vm_end: %lu\n",
3158 +			vma->vm_start, vma->vm_end);
3159 +
3160 +	printk(KERN_INFO "inserting page (pa: 0x%lx) at vaddr: 0x%lx  "
3161 +			"flags: 0x%lx  prot: 0x%lx\n",
3162 +			page_to_phys(page), addr,
3163 +			vma->vm_flags, pgprot_val(vma->vm_page_prot));
3164 +
3165 +	
3166 +	err = vm_insert_page(vma, addr, page);
3167 +	if (err) {
3168 +		printk(KERN_INFO "vm_insert_page() failed (%d)\n", err);
3169 +		err = -EINVAL;
3170 +		goto out;
3171 +	}
3172 +out:
3173 +	return err;
3174 +}
3175 +	
3176 +static int do_map_colored_pages(struct vm_area_struct *vma)
3177 +{
3178 +	const unsigned long nr_pages = vma_nr_pages(vma);
3179 +	unsigned long nr_mapped;
3180 +	int i, start_bank = -1, start_color = -1;
3181 +	int cur_bank = -1, cur_color = -1, err = 0;
3182 +	int colors[16] = {0}, banks[8] = {0};
3183 +
3184 +	if (bypass_cache == 1)
3185 +		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3186 +	
3187 +	for (i = 0; i < NUM_BANKS; i++) {
3188 +		if (((color_param.bank >> i)&0x1) == 1)
3189 +			banks[i] = 1;
3190 +	}
3191 +	
3192 +	for (i = 0; i < NUM_COLORS; i++) {
3193 +		if (((color_param.color >> i)&0x1) == 1)
3194 +			colors[i] = 1;
3195 +	}
3196 +	
3197 +	for (i = 0; i < NUM_BANKS; i++) {
3198 +		if (banks[i] == 1) {
3199 +			start_bank = i;
3200 +			break;
3201 +		}
3202 +	}
3203 +	for (i = 0; i < NUM_COLORS; i++) {
3204 +		if (colors[i] == 1) {
3205 +			start_color = i;
3206 +			break;
3207 +		}
3208 +	}	
3209 +		
3210 +	cur_bank = start_bank;
3211 +	cur_color = start_color;
3212 +	
3213 +	for (i = 0; i < NUM_BANKS; i++) {
3214 +		printk(KERN_INFO "BANK[%d] = %d\n", i, banks[i]);
3215 +	}
3216 +	printk(KERN_INFO "cur_bank = %d\n", cur_bank);
3217 +	for (i = 0; i < NUM_COLORS; i++) {
3218 +		printk(KERN_INFO "COLOR[%d] = %d\n", i, colors[i]);
3219 +	}
3220 +	printk(KERN_INFO "cur_color = %d\n", cur_color);
3221 +	
3222 +	
3223 +	TRACE_CUR("allocating %lu pages (flags:%lx prot:%lx)\n",
3224 +			nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot));
3225 +	
3226 +	for (nr_mapped = 0; nr_mapped < nr_pages; nr_mapped++) {
3227 +		const unsigned long addr = vma->vm_start + (nr_mapped << PAGE_SHIFT);
3228 +		const unsigned long color_no = cur_bank*NUM_COLORS + cur_color;
3229 +		
3230 +		err = do_map_colored_page(vma, addr, color_no);
3231 +		printk(KERN_INFO "mapped bank[%d], color[%d], color_no = %lu at 0x%lx\n", 
3232 +			cur_bank, cur_color, color_no, addr);
3233 +		if (err) {
3234 +			TRACE_CUR("Could not map colored page set.\n");
3235 +			err = -EINVAL;
3236 +			goto out;
3237 +		}
3238 +		do {
3239 +			cur_color++;
3240 +		} while(colors[cur_color] == 0);
3241 +		
3242 +		if (cur_color >= NUM_COLORS) {
3243 +			do {
3244 +				cur_bank++;
3245 +			} while(banks[cur_bank] == 0);
3246 +			cur_color = start_color;
3247 +		}
3248 +		
3249 +		if (cur_bank >= NUM_BANKS) {
3250 +			cur_bank = start_bank;
3251 +		}			
3252 +	}
3253 +	TRACE_CUR("Successfully mapped %lu pages.\n", nr_mapped);
3254 + out:
3255 +	return err;
3256 +}
3257 +
3258 +static int map_colored_pages(struct vm_area_struct *vma)
3259 +{
3260 +	int err = 0;
3261 +
3262 +	printk(KERN_INFO "User requests %lu pages.\n", vma_nr_pages(vma));
3263 +	if (MAX_COLORED_PAGE < vma_nr_pages(vma)) {
3264 +		TRACE_CUR("Max page request %lu but want %lu.\n",
3265 +				MAX_COLORED_PAGE, vma_nr_pages(vma));
3266 +		err = -EINVAL;
3267 +		goto out;
3268 +	}
3269 +	err = do_map_colored_pages(vma);
3270 +out:
3271 +	return err;
3272 +}
3273 +
3274 +static void litmus_color_shm_vm_close(struct vm_area_struct *vma)
3275 +{
3276 +
3277 +	TRACE_CUR("flags=0x%lx prot=0x%lx\n",
3278 +			vma->vm_flags, pgprot_val(vma->vm_page_prot));
3279 +
3280 +	TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n",
3281 +			(void*) vma->vm_start, (void*) vma->vm_end, vma,
3282 +			vma->vm_private_data);
3283 +
3284 +}
3285 +
3286 +static int litmus_color_shm_vm_fault(struct vm_area_struct *vma,
3287 +		struct vm_fault *vmf)
3288 +{
3289 +	/* This function should never be called, since
3290 +	 * all pages should have been mapped by mmap()
3291 +	 * already. */
3292 +	TRACE_CUR("flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff);
3293 +	printk(KERN_INFO "flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff);
3294 +
3295 +	printk(KERN_INFO "Page fault in color ctrl page! prot=0x%lx\n", pgprot_val(vma->vm_page_prot));
3296 +
3297 +	return VM_FAULT_SIGBUS;
3298 +}
3299 +
3300 +static struct vm_operations_struct litmus_color_shm_vm_ops = {
3301 +	.close	= litmus_color_shm_vm_close,
3302 +	.fault	= litmus_color_shm_vm_fault,
3303 +};
3304 +
3305 +static int litmus_color_shm_mmap(struct file *filp, struct vm_area_struct *vma)
3306 +{
3307 +	int err = 0;
3308 +
3309 +	printk(KERN_INFO "mmap called\n");
3310 +	
3311 +	if (color_param.color == 0x00000000 || color_param.bank == 0x00000000) {
3312 +		printk(KERN_INFO "color_info not set.\n");
3313 +		return -EINVAL;
3314 +	}
3315 +	if (color_offset.offset == 0xffffffff || color_offset.lock == -1) {
3316 +		printk(KERN_INFO "color_offset not set.\n");
3317 +		return -EINVAL;
3318 +	}
3319 +	
3320 +	err = mmap_common_checks(vma);
3321 +	if (err) {
3322 +		TRACE_CUR("failed mmap common checks\n");
3323 +		goto out;
3324 +	}
3325 +
3326 +	vma->vm_ops = &litmus_color_shm_vm_ops;
3327 +	mmap_common_vma_flags(vma);
3328 +
3329 +	err = map_colored_pages(vma);
3330 +
3331 +	TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags,
3332 +			pgprot_val(vma->vm_page_prot));
3333 +out:
3334 +	color_param.color == 0x00000000;
3335 +	color_param.bank == 0x00000000;
3336 +	color_offset.offset == 0xffffffff;
3337 +	color_offset.lock == -1;
3338 +	
3339 +	return err;
3340 +
3341 +}
3342 +
3343 +static long litmus_color_shm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3344 +{
3345 +	long err = -ENOIOCTLCMD;
3346 +	struct color_ioctl_cmd color_info;
3347 +	struct color_ioctl_offset color_off;
3348 +				
3349 +	printk(KERN_INFO "color_shm ioctl\n");
3350 +	
3351 +	if (_IOC_TYPE(cmd) != SHM_MAJOR)
3352 +		return -ENOTTY;
3353 +	
3354 +	
3355 +	switch (cmd) {
3356 +		case SET_COLOR_SHM_CMD:
3357 +			
3358 +			err = copy_from_user(&color_info, (void*)arg, sizeof(struct color_ioctl_cmd));
3359 +	
3360 +			color_param.color = color_info.color;
3361 +			color_param.bank = color_info.bank;
3362 +			printk(KERN_INFO "COLOR = %x\n", color_param.color);
3363 +			printk(KERN_INFO "BANK  = %x\n", color_param.bank);
3364 +			err = 0;
3365 +			break;
3366 +		case SET_COLOR_SHM_OFFSET:
3367 +			err = copy_from_user(&color_off, (void*)arg, sizeof(struct color_ioctl_offset));
3368 +	
3369 +			color_offset.offset = color_off.offset;
3370 +			color_offset.lock = color_off.lock;
3371 +			printk(KERN_INFO "OFFSET = %x\n", color_offset.offset);
3372 +			printk(KERN_INFO "LOCK   = %d\n", color_offset.lock);
3373 +			err = 0;
3374 +			break;
3375 +			
3376 +		default:
3377 +			printk(KERN_INFO "Invalid IOCTL CMD\n");
3378 +			err = -EINVAL;
3379 +	}
3380 +
3381 +	return err;
3382 +}
3383 +
3384 +static struct file_operations litmus_color_shm_fops = {
3385 +	.owner	= THIS_MODULE,
3386 +	.mmap	= litmus_color_shm_mmap,
3387 +	.unlocked_ioctl	= litmus_color_shm_ioctl,
3388 +};
3389 +
3390 +static struct miscdevice litmus_color_shm_dev = {
3391 +	.name	= DEV_NAME,
3392 +	.minor	= MISC_DYNAMIC_MINOR,
3393 +	.fops	= &litmus_color_shm_fops,
3394 +};
3395 +
3396 +struct mutex bypass_mutex;
3397 +
3398 +int bypass_proc_handler(struct ctl_table *table, int write,
3399 +		void __user *buffer, size_t *lenp, loff_t *ppos)
3400 +{
3401 +	int ret, mode;
3402 +
3403 +	mutex_lock(&bypass_mutex);
3404 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
3405 +	printk(KERN_INFO "shm_bypass = %d\n", bypass_cache);
3406 +	mutex_unlock(&bypass_mutex);
3407 +	
3408 +	return ret;
3409 +}
3410 +
3411 +static int zero = 0;
3412 +static int one = 1;
3413 +
3414 +static struct ctl_table cache_table[] =
3415 +{
3416 +	{
3417 +		.procname	= "shm_bypass",
3418 +		.mode		= 0666,
3419 +		.proc_handler	= bypass_proc_handler,
3420 +		.data		= &bypass_cache,
3421 +		.maxlen		= sizeof(bypass_cache),
3422 +		.extra1		= &zero,
3423 +		.extra2		= &one,
3424 +	},	
3425 +	{ }
3426 +};
3427 +
3428 +static struct ctl_table litmus_dir_table[] = {
3429 +	{
3430 +		.procname	= "litmus",
3431 + 		.mode		= 0555,
3432 +		.child		= cache_table,
3433 +	},
3434 +	{ }
3435 +};
3436 +
3437 +static struct ctl_table_header *litmus_sysctls;
3438 +
3439 +static int __init init_color_shm_devices(void)
3440 +{
3441 +	int err;
3442 +
3443 +	printk(KERN_INFO "Registering LITMUS^RT color_shm devices.\n");
3444 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
3445 +	if (!litmus_sysctls) {
3446 +		printk(KERN_WARNING "Could not register LITMUS^RT color_shm sysctl.\n");
3447 +		err = -EFAULT;
3448 +	}
3449 +	
3450 +	mutex_init(&dev_lock);
3451 +	mutex_init(&bypass_mutex);
3452 +	color_param.color = 0x00000000;
3453 +	color_param.bank = 0x00000000;
3454 +	color_offset.offset = 0xffffffff;
3455 +	color_offset.lock = -1;
3456 +	bypass_cache = 0;
3457 +	err = misc_register(&litmus_color_shm_dev);
3458 +	
3459 +	return err;
3460 +}
3461 +
3462 +static void __exit exit_color_shm_devices(void)
3463 +{
3464 +	misc_deregister(&litmus_color_shm_dev);
3465 +	printk(KERN_INFO "Unregistering %s device.\n", DEV_NAME);
3466 +}
3467 +
3468 +module_init(init_color_shm_devices);
3469 +module_exit(exit_color_shm_devices);
3470 \ No newline at end of file
3471 diff --git a/litmus/litmus.c b/litmus/litmus.c
3472 index db5ce0e9..7fbabce 100644
3473 --- a/litmus/litmus.c
3474 +++ b/litmus/litmus.c
3475 @@ -14,6 +14,10 @@
3476  #include <linux/sched/rt.h>
3477  #include <linux/rwsem.h>
3478  #include <linux/interrupt.h>
3479 +#include <linux/migrate.h>
3480 +#include <linux/mm.h>
3481 +#include <linux/memcontrol.h>
3482 +#include <linux/mm_inline.h>
3483  
3484  #include <litmus/litmus.h>
3485  #include <litmus/bheap.h>
3486 @@ -21,6 +25,9 @@
3487  #include <litmus/rt_domain.h>
3488  #include <litmus/litmus_proc.h>
3489  #include <litmus/sched_trace.h>
3490 +#include <litmus/cache_proc.h>
3491 +#include <litmus/mc2_common.h>
3492 +#include <litmus/replicate_lib.h>
3493  
3494  #ifdef CONFIG_SCHED_CPU_AFFINITY
3495  #include <litmus/affinity.h>
3496 @@ -31,6 +38,8 @@
3497  #include <trace/events/litmus.h>
3498  #endif
3499  
3500 +extern void l2c310_flush_all(void);
3501 +
3502  /* Number of RT tasks that exist in the system */
3503  atomic_t rt_task_count 		= ATOMIC_INIT(0);
3504  
3505 @@ -160,6 +169,14 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
3506  		       pid, tp.budget_policy);
3507  		goto out_unlock;
3508  	}
3509 +#ifdef CONFIG_PGMRT_SUPPORT
3510 +	if (tp.pgm_type < PGM_NOT_A_NODE || tp.pgm_type > PGM_INTERNAL) {
3511 +		printk(KERN_INFO "litmus: real-time task %d rejected "
3512 +				"because of unknown PGM node type specified (%d)\n",
3513 +				pid, tp.pgm_type);
3514 +		goto out_unlock;
3515 +	}
3516 +#endif
3517  
3518  	target->rt_param.task_params = tp;
3519  
3520 @@ -314,6 +331,248 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
3521  	return ret;
3522  }
3523  
3524 +asmlinkage long sys_reservation_create(int type, void __user *config)
3525 +{
3526 +    return litmus->reservation_create(type, config);
3527 +}
3528 +
3529 +asmlinkage long sys_reservation_destroy(unsigned int reservation_id, int cpu)
3530 +{
3531 +    return litmus->reservation_destroy(reservation_id, cpu);
3532 +}
3533 +
3534 +static unsigned long color_mask;
3535 +
3536 +static inline unsigned long page_color(struct page *page)
3537 +{
3538 +    return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT);
3539 +}
3540 +
3541 +extern int isolate_lru_page(struct page *page);
3542 +extern void putback_movable_page(struct page *page);
3543 +extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x);
3544 +
3545 +#define INVALID_PFN				(0xffffffff)
3546 +LIST_HEAD(shared_lib_pages);
3547 +
3548 +EXPORT_SYMBOL(shared_lib_pages);
3549 +
3550 +/* Reallocate pages of a task 
3551 + * Private pages - Migrate to a new page.
3552 + * Shared pages - Use a replica. Make a replica if necessary.
3553 + * @cpu : CPU id of the calling task
3554 + * returns the number of pages that is not moved.
3555 + */
3556 +asmlinkage long sys_set_page_color(int cpu)
3557 +{
3558 +	long ret = 0;
3559 +	struct vm_area_struct *vma_itr = NULL;
3560 +	int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0, nr_not_migrated = 0;
3561 +	unsigned long node;
3562 +	enum crit_level lv;
3563 +	struct mm_struct *mm;
3564 +		
3565 +	LIST_HEAD(pagelist);
3566 +	LIST_HEAD(task_shared_pagelist);
3567 +
3568 +	migrate_prep();
3569 +	
3570 +	/* Find the current mm_struct */
3571 +	rcu_read_lock();
3572 +	get_task_struct(current);
3573 +	rcu_read_unlock();
3574 +	mm = get_task_mm(current);
3575 +	put_task_struct(current);
3576 +
3577 +	down_read(&mm->mmap_sem);
3578 +
3579 +	vma_itr = mm->mmap;
3580 +	/* Iterate all vm_area_struct */
3581 +	while (vma_itr != NULL) {
3582 +		unsigned int num_pages = 0, i;
3583 +		struct page *old_page = NULL;
3584 +		int pages_in_vma = 0;
3585 +		
3586 +		num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE;
3587 +		/* Traverse all pages in vm_area_struct */
3588 +		for (i = 0; i < num_pages; i++) {
3589 +			old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT);
3590 +			
3591 +			if (IS_ERR(old_page))
3592 +				continue;
3593 +			if (!old_page)
3594 +				continue;
3595 +
3596 +			if (PageReserved(old_page)) {
3597 +				TRACE("Reserved Page!\n");
3598 +				put_page(old_page);
3599 +				continue;
3600 +			}
3601 +			
3602 +			TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-");
3603 +			pages_in_vma++;
3604 +
3605 +			/* Conditions for replicable pages */
3606 +			if (page_count(old_page) > 2 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) {
3607 +				struct shared_lib_page *lib_page;
3608 +				int is_exist = 0;
3609 +
3610 +				/* Update PSL (Per-core shared library (master)) list */
3611 +				/* Check if this page is in the PSL list */
3612 +				rcu_read_lock();
3613 +				list_for_each_entry(lib_page, &shared_lib_pages, list)
3614 +				{
3615 +					if (page_to_pfn(old_page) == lib_page->master_pfn) {
3616 +						is_exist = 1;
3617 +						break;
3618 +					}
3619 +				}
3620 +				rcu_read_unlock();
3621 +	
3622 +				if (is_exist == 0) {
3623 +					int cpu_i;
3624 +					lib_page = kmalloc(sizeof(struct shared_lib_page), GFP_KERNEL);
3625 +					lib_page->master_page = old_page;
3626 +					lib_page->master_pfn = page_to_pfn(old_page);
3627 +					for (cpu_i = 0; cpu_i < NR_CPUS+1; cpu_i++) {
3628 +						lib_page->r_page[cpu_i] = NULL;
3629 +						lib_page->r_pfn[cpu_i] = INVALID_PFN;
3630 +					}
3631 +					list_add_tail(&lib_page->list, &shared_lib_pages);
3632 +				}
3633 +				
3634 +				/* add to task_shared_pagelist */
3635 +				ret = isolate_lru_page(old_page);
3636 +				if (!ret) {
3637 +					list_add_tail(&old_page->lru, &task_shared_pagelist);
3638 +					inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
3639 +					nr_shared_pages++;
3640 +				} else {
3641 +					TRACE_TASK(current, "isolate_lru_page for a shared page failed\n");
3642 +					nr_failed++;
3643 +				}
3644 +				put_page(old_page);
3645 +			}
3646 +			else {
3647 +				ret = isolate_lru_page(old_page);
3648 +				if (!ret) {
3649 +					list_add_tail(&old_page->lru, &pagelist);
3650 +					inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
3651 +					nr_pages++;
3652 +				} else {
3653 +					TRACE_TASK(current, "isolate_lru_page for a private page failed\n");
3654 +					nr_failed++;
3655 +				}
3656 +				put_page(old_page);
3657 +			}
3658 +		}
3659 +		TRACE_TASK(current, "PAGES_IN_VMA = %d size = %d KB\n", pages_in_vma, pages_in_vma*4);
3660 +		vma_itr = vma_itr->vm_next;
3661 +	}
3662 +	
3663 +	ret = 0;
3664 +	lv = tsk_rt(current)->mc2_data->crit;
3665 +	if (cpu == -1)
3666 +		node = 8;
3667 +	else
3668 +		node = cpu*2 + lv;
3669 +
3670 +	/* Migrate private pages */
3671 +	if (!list_empty(&pagelist)) {
3672 +		ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
3673 +		TRACE_TASK(current, "%ld pages not migrated.\n", ret);
3674 +		nr_not_migrated = ret;
3675 +		if (ret) {
3676 +			putback_movable_pages(&pagelist);
3677 +		}
3678 +	}
3679 +
3680 +	/* Replicate shared pages */
3681 +	if (!list_empty(&task_shared_pagelist)) {
3682 +		ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
3683 +		TRACE_TASK(current, "%ld shared pages not migrated.\n", ret);
3684 +		nr_not_migrated += ret;
3685 +		if (ret) {
3686 +			putback_movable_pages(&task_shared_pagelist);
3687 +		}
3688 +	}
3689 +
3690 +	up_read(&mm->mmap_sem);
3691 +
3692 +	TRACE_TASK(current, "nr_pages = %d nr_failed = %d nr_not_migrated = %d\n", nr_pages, nr_failed, nr_not_migrated);
3693 +	printk(KERN_INFO "node = %ld, nr_private_pages = %d, nr_shared_pages = %d, nr_failed_to_isolate_lru = %d, nr_not_migrated = %d\n", node, nr_pages, nr_shared_pages, nr_failed, nr_not_migrated);
3694 +
3695 +	flush_cache(1);
3696 +	
3697 +	return nr_not_migrated;
3698 +}
3699 +
3700 +/* sys_test_call() is a test system call for debugging */
3701 +asmlinkage long sys_test_call(unsigned int param)
3702 +{
3703 +	long ret = 0;
3704 +	struct vm_area_struct *vma_itr = NULL;
3705 +	
3706 +	TRACE_CUR("test_call param = %d\n", param);
3707 +	
3708 +	/* if param == 0, 
3709 +	 * show vm regions and the page frame numbers 
3710 +	 * associated with the vm region.
3711 +	 * if param == 1, 
3712 +	 * print the master list. 
3713 +	 */
3714 +	if (param == 0) {
3715 +		down_read(&current->mm->mmap_sem);
3716 +		vma_itr = current->mm->mmap;
3717 +		while (vma_itr != NULL) {
3718 +			int i, num_pages;
3719 +			struct page* old_page;
3720 +			TRACE_TASK(current, "------------------------------------------------------\n");
3721 +			TRACE_TASK(current, "vm_start : %lx\n", vma_itr->vm_start);
3722 +			TRACE_TASK(current, "vm_end   : %lx\n", vma_itr->vm_end);
3723 +			TRACE_TASK(current, "vm_flags : %lx\n", vma_itr->vm_flags);
3724 +			TRACE_TASK(current, "vm_prot  : %x\n", pgprot_val(vma_itr->vm_page_prot));
3725 +			TRACE_TASK(current, "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED);
3726 +	
3727 +			num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE;
3728 +			for (i = 0; i < num_pages; i++) {
3729 +				old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT);
3730 +				
3731 +				if (IS_ERR(old_page))
3732 +					continue;
3733 +				if (!old_page)
3734 +					continue;
3735 +
3736 +				if (PageReserved(old_page)) {
3737 +					TRACE("Reserved Page!\n");
3738 +					put_page(old_page);
3739 +					continue;
3740 +				}
3741 +				
3742 +				TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s mapping: %p\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-", &(old_page->mapping));
3743 +				put_page(old_page);
3744 +			}
3745 +			vma_itr = vma_itr->vm_next;
3746 +		}
3747 +		TRACE_TASK(current, "------------------------------------------------------\n");
3748 +		up_read(&current->mm->mmap_sem);
3749 +	} else if (param == 1) {
3750 +		TRACE_TASK(current, "Shared pages and replicas.\n");
3751 +		{
3752 +			struct shared_lib_page *lpage;
3753 +
3754 +			rcu_read_lock();
3755 +			list_for_each_entry(lpage, &shared_lib_pages, list)
3756 +			{
3757 +				TRACE_TASK(current, "master_PFN = %05lx r_PFN = %05lx, %05lx, %05lx, %05lx, %05lx\n", lpage->master_pfn, lpage->r_pfn[0], lpage->r_pfn[1], lpage->r_pfn[2], lpage->r_pfn[3], lpage->r_pfn[4]);
3758 +			}
3759 +			rcu_read_unlock();
3760 +		}
3761 +	}
3762 +	
3763 +	return ret;
3764 +}
3765 +
3766  /* p is a real-time task. Re-init its state as a best-effort task. */
3767  static void reinit_litmus_state(struct task_struct* p, int restore)
3768  {
3769 @@ -651,6 +910,12 @@ static int __init _init_litmus(void)
3770  	 *      mode change lock is used to enforce single mode change
3771  	 *      operation.
3772  	 */
3773 +#if defined(CONFIG_CPU_V7)
3774 +	unsigned int line_size_log = 5; // 2^5 = 32 byte
3775 +	unsigned int cache_info_sets = 2048; // 64KB (way_size) / 32B (line_size) = 2048
3776 +	printk("LITMIS^RT-ARM kernel\n");
3777 +#endif
3778 +
3779  	printk("Starting LITMUS^RT kernel\n");
3780  
3781  	register_sched_plugin(&linux_sched_plugin);
3782 @@ -665,11 +930,15 @@ static int __init _init_litmus(void)
3783  	else
3784  		printk("Could not register kill rt tasks magic sysrq.\n");
3785  #endif
3786 -
3787  	init_litmus_proc();
3788  
3789  	register_reboot_notifier(&shutdown_notifier);
3790  
3791 +#if defined(CONFIG_CPU_V7)
3792 +	color_mask = ((cache_info_sets << line_size_log) - 1) ^ (PAGE_SIZE - 1);
3793 +	printk("Page color mask %lx\n", color_mask);
3794 +#endif
3795 +
3796  	return 0;
3797  }
3798  
3799 diff --git a/litmus/mc2_common.c b/litmus/mc2_common.c
3800 new file mode 100644
3801 index 0000000..a8ea5d9
3802 --- /dev/null
3803 +++ b/litmus/mc2_common.c
3804 @@ -0,0 +1,78 @@
3805 +/*
3806 + * litmus/mc2_common.c
3807 + *
3808 + * Common functions for MC2 plugin.
3809 + */
3810 +
3811 +#include <linux/percpu.h>
3812 +#include <linux/sched.h>
3813 +#include <linux/list.h>
3814 +#include <linux/slab.h>
3815 +#include <asm/uaccess.h>
3816 +
3817 +#include <litmus/litmus.h>
3818 +#include <litmus/sched_plugin.h>
3819 +#include <litmus/sched_trace.h>
3820 +
3821 +#include <litmus/mc2_common.h>
3822 +
3823 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk, struct reservation *res)
3824 +{
3825 +	task_client_init(tc, tsk, res);
3826 +	if ((mc2_param->crit < CRIT_LEVEL_A) ||
3827 +		(mc2_param->crit > CRIT_LEVEL_C))
3828 +		return -EINVAL;
3829 +	
3830 +	TRACE_TASK(tsk, "mc2_task_client_init: crit_level = %d\n", mc2_param->crit);
3831 +	
3832 +	return 0;
3833 +}
3834 +
3835 +asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param)
3836 +{
3837 +	struct task_struct *target;
3838 +	int retval = -EINVAL;
3839 +	struct mc2_task *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
3840 +	
3841 +	if (!mp)
3842 +		return -ENOMEM;
3843 +
3844 +	printk("Setting up mc^2 task parameters for process %d.\n", pid);
3845 +
3846 +	if (pid < 0 || param == 0) {
3847 +		goto out;
3848 +	}
3849 +	if (copy_from_user(mp, param, sizeof(*mp))) {
3850 +		retval = -EFAULT;
3851 +		goto out;
3852 +	}
3853 +
3854 +	/* Task search and manipulation must be protected */
3855 +	read_lock_irq(&tasklist_lock);
3856 +	if (!(target = find_task_by_vpid(pid))) {
3857 +		retval = -ESRCH;
3858 +		goto out_unlock;
3859 +	}
3860 +
3861 +	if (is_realtime(target)) {
3862 +		/* The task is already a real-time task.
3863 +		 * We cannot not allow parameter changes at this point.
3864 +		 */
3865 +		retval = -EBUSY;
3866 +		goto out_unlock;
3867 +	}
3868 +	if (mp->crit < CRIT_LEVEL_A || mp->crit >= NUM_CRIT_LEVELS) {
3869 +		printk(KERN_INFO "litmus: real-time task %d rejected "
3870 +			"because of invalid criticality level\n", pid);
3871 +		goto out_unlock;
3872 +	}
3873 +	
3874 +	//target->rt_param.plugin_state = mp;
3875 +	target->rt_param.mc2_data = mp;
3876 +
3877 +	retval = 0;
3878 +out_unlock:
3879 +	read_unlock_irq(&tasklist_lock);
3880 +out:
3881 +	return retval;
3882 +}
3883 \ No newline at end of file
3884 diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c
3885 new file mode 100644
3886 index 0000000..d44a403
3887 --- /dev/null
3888 +++ b/litmus/polling_reservations.c
3889 @@ -0,0 +1,563 @@
3890 +#include <linux/sched.h>
3891 +
3892 +#include <litmus/litmus.h>
3893 +#include <litmus/reservation.h>
3894 +#include <litmus/polling_reservations.h>
3895 +
3896 +static void periodic_polling_client_arrives(
3897 +	struct reservation* res,
3898 +	struct reservation_client *client
3899 +)
3900 +{
3901 +	struct polling_reservation *pres =
3902 +		container_of(res, struct polling_reservation, res);
3903 +	lt_t instances, tmp;
3904 +
3905 +	list_add_tail(&client->list, &res->clients);
3906 +
3907 +	switch (res->state) {
3908 +		case RESERVATION_INACTIVE:
3909 +			/* Figure out next replenishment time. */
3910 +			if (res->env->time_zero == 0) {
3911 +				tmp = res->env->current_time - res->env->time_zero;
3912 +				instances =  div64_u64(tmp, pres->period);
3913 +				res->next_replenishment =
3914 +					(instances + 1) * pres->period + pres->offset;
3915 +			}
3916 +			else {
3917 +				tmp = res->env->current_time - res->env->time_zero;
3918 +				instances =  div64_u64(tmp, pres->period);
3919 +				res->next_replenishment = res->env->time_zero + instances * pres->period;
3920 +			}
3921 +				
3922 +			TRACE("ENV_TIME_ZERO %llu\n", res->env->time_zero);
3923 +			TRACE("pol-res: R%d activate tmp=%llu instances=%llu period=%llu nextrp=%llu cur=%llu\n",
3924 +				res->id, tmp, instances, pres->period, res->next_replenishment,
3925 +				res->env->current_time);
3926 +
3927 +			res->env->change_state(res->env, res,
3928 +				RESERVATION_DEPLETED);
3929 +			break;
3930 +
3931 +		case RESERVATION_ACTIVE:
3932 +		case RESERVATION_DEPLETED:
3933 +			/* do nothing */
3934 +			break;
3935 +
3936 +		case RESERVATION_ACTIVE_IDLE:
3937 +			res->blocked_by_ghost = 0;
3938 +			res->env->change_state(res->env, res,
3939 +				RESERVATION_ACTIVE);
3940 +			break;
3941 +	}
3942 +}
3943 +
3944 +
3945 +static void periodic_polling_client_departs(
3946 +	struct reservation *res,
3947 +	struct reservation_client *client,
3948 +	int did_signal_job_completion
3949 +)
3950 +{
3951 +	list_del(&client->list);
3952 +
3953 +	switch (res->state) {
3954 +		case RESERVATION_INACTIVE:
3955 +		case RESERVATION_ACTIVE_IDLE:
3956 +			BUG(); /* INACTIVE or IDLE <=> no client */
3957 +			break;
3958 +
3959 +		case RESERVATION_ACTIVE:
3960 +			if (list_empty(&res->clients)) {
3961 +				res->env->change_state(res->env, res,
3962 +//						RESERVATION_ACTIVE_IDLE);
3963 +					res->cur_budget ?
3964 +						RESERVATION_ACTIVE_IDLE :
3965 +						RESERVATION_DEPLETED);
3966 +//					did_signal_job_completion ?
3967 +//						RESERVATION_DEPLETED :
3968 +//						RESERVATION_ACTIVE_IDLE);
3969 +			} /* else: nothing to do, more clients ready */
3970 +			break;
3971 +
3972 +		case RESERVATION_DEPLETED:
3973 +			/* do nothing */
3974 +			break;
3975 +	}
3976 +}
3977 +
3978 +static void periodic_polling_on_replenishment(
3979 +	struct reservation *res
3980 +)
3981 +{
3982 +	struct polling_reservation *pres =
3983 +		container_of(res, struct polling_reservation, res);
3984 +
3985 +	/* replenish budget */
3986 +	res->cur_budget = pres->max_budget;
3987 +	res->next_replenishment += pres->period;
3988 +	res->budget_consumed = 0;
3989 +
3990 +	TRACE("polling_replenish(%u): next_replenishment=%llu\n", res->id, res->next_replenishment);
3991 +	switch (res->state) {
3992 +		case RESERVATION_DEPLETED:
3993 +		case RESERVATION_INACTIVE:
3994 +		case RESERVATION_ACTIVE_IDLE:
3995 +			if (list_empty(&res->clients))
3996 +				/* no clients => poll again later */
3997 +				res->env->change_state(res->env, res,
3998 +					RESERVATION_INACTIVE);
3999 +			else
4000 +				/* we have clients & budget => ACTIVE */
4001 +				res->env->change_state(res->env, res,
4002 +					RESERVATION_ACTIVE);
4003 +			break;
4004 +
4005 +		case RESERVATION_ACTIVE:
4006 +			/* Replenished while active => tardy? In any case,
4007 +			 * go ahead and stay active. */
4008 +			break;
4009 +	}
4010 +}
4011 +
4012 +static void periodic_polling_on_replenishment_edf(
4013 +	struct reservation *res
4014 +)
4015 +{
4016 +	struct polling_reservation *pres =
4017 +		container_of(res, struct polling_reservation, res);
4018 +
4019 +	/* update current priority */
4020 +	res->priority = res->next_replenishment + pres->deadline;
4021 +
4022 +	/* do common updates */
4023 +	periodic_polling_on_replenishment(res);
4024 +}
4025 +
4026 +static void common_drain_budget(
4027 +		struct reservation *res,
4028 +		lt_t how_much)
4029 +{
4030 +	if (how_much >= res->cur_budget)
4031 +		res->cur_budget = 0;
4032 +	else
4033 +		res->cur_budget -= how_much;
4034 +
4035 +	res->budget_consumed += how_much;
4036 +	res->budget_consumed_total += how_much;
4037 +
4038 +	switch (res->state) {
4039 +		case RESERVATION_DEPLETED:
4040 +		case RESERVATION_INACTIVE:
4041 +			//BUG();
4042 +			TRACE("!!!!!!!!!!!!!!!STATE ERROR R%d STATE(%d)\n", res->id, res->state);
4043 +			break;
4044 +
4045 +		case RESERVATION_ACTIVE_IDLE:
4046 +		case RESERVATION_ACTIVE:
4047 +			if (!res->cur_budget) {
4048 +				res->env->change_state(res->env, res,
4049 +					RESERVATION_DEPLETED);
4050 +			} /* else: stay in current state */
4051 +			break;
4052 +	}
4053 +}
4054 +
4055 +static struct reservation_ops periodic_polling_ops_fp = {
4056 +	.dispatch_client = default_dispatch_client,
4057 +	.client_arrives = periodic_polling_client_arrives,
4058 +	.client_departs = periodic_polling_client_departs,
4059 +	.replenish = periodic_polling_on_replenishment,
4060 +	.drain_budget = common_drain_budget,
4061 +};
4062 +
4063 +static struct reservation_ops periodic_polling_ops_edf = {
4064 +	.dispatch_client = default_dispatch_client,
4065 +	.client_arrives = periodic_polling_client_arrives,
4066 +	.client_departs = periodic_polling_client_departs,
4067 +	.replenish = periodic_polling_on_replenishment_edf,
4068 +	.drain_budget = common_drain_budget,
4069 +};
4070 +
4071 +
4072 +
4073 +
4074 +static void sporadic_polling_client_arrives_fp(
4075 +	struct reservation* res,
4076 +	struct reservation_client *client
4077 +)
4078 +{
4079 +	struct polling_reservation *pres =
4080 +		container_of(res, struct polling_reservation, res);
4081 +
4082 +	list_add_tail(&client->list, &res->clients);
4083 +
4084 +	switch (res->state) {
4085 +		case RESERVATION_INACTIVE:
4086 +			/* Replenish now. */
4087 +			res->cur_budget = pres->max_budget;
4088 +			res->next_replenishment =
4089 +				res->env->current_time + pres->period;
4090 +
4091 +			res->env->change_state(res->env, res,
4092 +				RESERVATION_ACTIVE);
4093 +			break;
4094 +
4095 +		case RESERVATION_ACTIVE:
4096 +		case RESERVATION_DEPLETED:
4097 +			/* do nothing */
4098 +			break;
4099 +
4100 +		case RESERVATION_ACTIVE_IDLE:
4101 +			res->env->change_state(res->env, res,
4102 +				RESERVATION_ACTIVE);
4103 +			break;
4104 +	}
4105 +}
4106 +
4107 +static void sporadic_polling_client_arrives_edf(
4108 +	struct reservation* res,
4109 +	struct reservation_client *client
4110 +)
4111 +{
4112 +	struct polling_reservation *pres =
4113 +		container_of(res, struct polling_reservation, res);
4114 +
4115 +	list_add_tail(&client->list, &res->clients);
4116 +
4117 +	switch (res->state) {
4118 +		case RESERVATION_INACTIVE:
4119 +			/* Replenish now. */
4120 +			res->cur_budget = pres->max_budget;
4121 +			res->next_replenishment =
4122 +				res->env->current_time + pres->period;
4123 +			res->priority =
4124 +				res->env->current_time + pres->deadline;
4125 +
4126 +			res->env->change_state(res->env, res,
4127 +				RESERVATION_ACTIVE);
4128 +			break;
4129 +
4130 +		case RESERVATION_ACTIVE:
4131 +		case RESERVATION_DEPLETED:
4132 +			/* do nothing */
4133 +			break;
4134 +
4135 +		case RESERVATION_ACTIVE_IDLE:
4136 +			res->env->change_state(res->env, res,
4137 +				RESERVATION_ACTIVE);
4138 +			break;
4139 +	}
4140 +}
4141 +
4142 +static struct reservation_ops sporadic_polling_ops_fp = {
4143 +	.dispatch_client = default_dispatch_client,
4144 +	.client_arrives = sporadic_polling_client_arrives_fp,
4145 +	.client_departs = periodic_polling_client_departs,
4146 +	.replenish = periodic_polling_on_replenishment,
4147 +	.drain_budget = common_drain_budget,
4148 +};
4149 +
4150 +static struct reservation_ops sporadic_polling_ops_edf = {
4151 +	.dispatch_client = default_dispatch_client,
4152 +	.client_arrives = sporadic_polling_client_arrives_edf,
4153 +	.client_departs = periodic_polling_client_departs,
4154 +	.replenish = periodic_polling_on_replenishment_edf,
4155 +	.drain_budget = common_drain_budget,
4156 +};
4157 +
4158 +void polling_reservation_init(
4159 +	struct polling_reservation *pres,
4160 +	int use_edf_prio,
4161 +	int use_periodic_polling,
4162 +	lt_t budget, lt_t period, lt_t deadline, lt_t offset
4163 +)
4164 +{
4165 +	if (!deadline)
4166 +		deadline = period;
4167 +	BUG_ON(budget > period);
4168 +	BUG_ON(budget > deadline);
4169 +	BUG_ON(offset >= period);
4170 +
4171 +	reservation_init(&pres->res);
4172 +	pres->max_budget = budget;
4173 +	pres->period = period;
4174 +	pres->deadline = deadline;
4175 +	pres->offset = offset;
4176 +	TRACE_TASK(current, "polling_reservation_init: periodic %d, use_edf %d\n", use_periodic_polling, use_edf_prio);
4177 +	if (use_periodic_polling) {
4178 +		if (use_edf_prio)
4179 +			pres->res.ops = &periodic_polling_ops_edf;
4180 +		else
4181 +			pres->res.ops = &periodic_polling_ops_fp;
4182 +	} else {
4183 +		if (use_edf_prio)
4184 +			pres->res.ops = &sporadic_polling_ops_edf;
4185 +		else
4186 +			pres->res.ops = &sporadic_polling_ops_fp;
4187 +	}
4188 +}
4189 +
4190 +
4191 +static lt_t td_cur_major_cycle_start(struct table_driven_reservation *tdres)
4192 +{
4193 +	lt_t x, tmp;
4194 +
4195 +	tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
4196 +	x = div64_u64(tmp, tdres->major_cycle);
4197 +	x *= tdres->major_cycle;
4198 +	return x;
4199 +}
4200 +
4201 +
4202 +static lt_t td_next_major_cycle_start(struct table_driven_reservation *tdres)
4203 +{
4204 +	lt_t x, tmp;
4205 +
4206 +	tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
4207 +	x = div64_u64(tmp, tdres->major_cycle) + 1;
4208 +	x *= tdres->major_cycle;
4209 +	return x;
4210 +}
4211 +
4212 +static void td_client_arrives(
4213 +	struct reservation* res,
4214 +	struct reservation_client *client
4215 +)
4216 +{
4217 +	struct table_driven_reservation *tdres =
4218 +		container_of(res, struct table_driven_reservation, res);
4219 +
4220 +	list_add_tail(&client->list, &res->clients);
4221 +
4222 +	switch (res->state) {
4223 +		case RESERVATION_INACTIVE:
4224 +			/* Figure out first replenishment time. */
4225 +			tdres->major_cycle_start = td_next_major_cycle_start(tdres);
4226 +			res->next_replenishment  = tdres->major_cycle_start;
4227 +			res->next_replenishment += tdres->intervals[0].start;
4228 +			tdres->next_interval = 0;
4229 +
4230 +			res->env->change_state(res->env, res,
4231 +				RESERVATION_DEPLETED);
4232 +			break;
4233 +
4234 +		case RESERVATION_ACTIVE:
4235 +		case RESERVATION_DEPLETED:
4236 +			/* do nothing */
4237 +			break;
4238 +
4239 +		case RESERVATION_ACTIVE_IDLE:
4240 +			res->env->change_state(res->env, res,
4241 +				RESERVATION_ACTIVE);
4242 +			break;
4243 +	}
4244 +}
4245 +
4246 +static void td_client_departs(
4247 +	struct reservation *res,
4248 +	struct reservation_client *client,
4249 +	int did_signal_job_completion
4250 +)
4251 +{
4252 +	list_del(&client->list);
4253 +
4254 +	switch (res->state) {
4255 +		case RESERVATION_INACTIVE:
4256 +		case RESERVATION_ACTIVE_IDLE:
4257 +			//BUG(); /* INACTIVE or IDLE <=> no client */
4258 +			break;
4259 +
4260 +		case RESERVATION_ACTIVE:
4261 +			if (list_empty(&res->clients)) {
4262 +				res->env->change_state(res->env, res,
4263 +						RESERVATION_ACTIVE_IDLE);
4264 +			} /* else: nothing to do, more clients ready */
4265 +			break;
4266 +
4267 +		case RESERVATION_DEPLETED:
4268 +			/* do nothing */
4269 +			break;
4270 +	}
4271 +}
4272 +
4273 +static lt_t td_time_remaining_until_end(struct table_driven_reservation *tdres)
4274 +{
4275 +	lt_t now = tdres->res.env->current_time;
4276 +	lt_t end = tdres->cur_interval.end;
4277 +	//TRACE("td_remaining(%u): start=%llu now=%llu end=%llu state=%d\n", tdres->res.id,	tdres->cur_interval.start, now, end, tdres->res.state);
4278 +	if (now >=  end)
4279 +		return 0;
4280 +	else
4281 +		return end - now;
4282 +}
4283 +
4284 +static void td_replenish(
4285 +	struct reservation *res)
4286 +{
4287 +	struct table_driven_reservation *tdres =
4288 +		container_of(res, struct table_driven_reservation, res);
4289 +
4290 +	//TRACE("td_replenish(%u): expected_replenishment=%llu\n", res->id, res->next_replenishment);
4291 +
4292 +	/* figure out current interval */
4293 +	tdres->cur_interval.start = tdres->major_cycle_start +
4294 +		tdres->intervals[tdres->next_interval].start;
4295 +	tdres->cur_interval.end =  tdres->major_cycle_start +
4296 +		tdres->intervals[tdres->next_interval].end;
4297 +/*	TRACE("major_cycle_start=%llu => [%llu, %llu]\n",
4298 +		tdres->major_cycle_start,
4299 +		tdres->cur_interval.start,
4300 +		tdres->cur_interval.end);
4301 +*/
4302 +	/* reset budget */
4303 +	res->cur_budget = td_time_remaining_until_end(tdres);
4304 +	res->budget_consumed = 0;
4305 +	//TRACE("td_replenish(%u): %s budget=%llu\n", res->id, res->cur_budget ? "" : "WARNING", res->cur_budget);
4306 +
4307 +	/* prepare next slot */
4308 +	tdres->next_interval = (tdres->next_interval + 1) % tdres->num_intervals;
4309 +	if (!tdres->next_interval)
4310 +		/* wrap to next major cycle */
4311 +		tdres->major_cycle_start += tdres->major_cycle;
4312 +
4313 +	/* determine next time this reservation becomes eligible to execute */
4314 +	res->next_replenishment  = tdres->major_cycle_start;
4315 +	res->next_replenishment += tdres->intervals[tdres->next_interval].start;
4316 +	//TRACE("td_replenish(%u): next_replenishment=%llu\n", res->id, res->next_replenishment);
4317 +
4318 +
4319 +	switch (res->state) {
4320 +		case RESERVATION_DEPLETED:
4321 +		case RESERVATION_ACTIVE:
4322 +		case RESERVATION_ACTIVE_IDLE:
4323 +			if (list_empty(&res->clients))
4324 +				res->env->change_state(res->env, res,
4325 +					RESERVATION_ACTIVE_IDLE);
4326 +			else
4327 +				/* we have clients & budget => ACTIVE */
4328 +				res->env->change_state(res->env, res,
4329 +					RESERVATION_ACTIVE);
4330 +			break;
4331 +
4332 +		case RESERVATION_INACTIVE:
4333 +			BUG();
4334 +			break;
4335 +	}
4336 +}
4337 +
4338 +static void td_drain_budget(
4339 +		struct reservation *res,
4340 +		lt_t how_much)
4341 +{
4342 +	struct table_driven_reservation *tdres =
4343 +		container_of(res, struct table_driven_reservation, res);
4344 +
4345 +	res->budget_consumed += how_much;
4346 +	res->budget_consumed_total += how_much;
4347 +
4348 +	/* Table-driven scheduling: instead of tracking the budget, we compute
4349 +	 * how much time is left in this allocation interval. */
4350 +
4351 +	/* sanity check: we should never try to drain from future slots */
4352 +	//TRACE("TD_DRAIN STATE(%d) [%llu,%llu]  %llu ?\n", res->state, tdres->cur_interval.start, tdres->cur_interval.end, res->env->current_time);
4353 +	//BUG_ON(tdres->cur_interval.start > res->env->current_time);
4354 +	if (tdres->cur_interval.start > res->env->current_time)
4355 +		TRACE("TD_DRAIN BUG!!!!!!!!!!\n");
4356 +
4357 +	switch (res->state) {
4358 +		case RESERVATION_DEPLETED:
4359 +		case RESERVATION_INACTIVE:
4360 +			//BUG();
4361 +			TRACE("TD_DRAIN!!!!!!!!! RES_STATE = %d\n", res->state);
4362 +			break;
4363 +
4364 +		case RESERVATION_ACTIVE_IDLE:
4365 +		case RESERVATION_ACTIVE:
4366 +			res->cur_budget = td_time_remaining_until_end(tdres);
4367 +			//TRACE("td_drain_budget(%u): drained to budget=%llu\n", res->id, res->cur_budget);
4368 +			if (!res->cur_budget) {
4369 +				res->env->change_state(res->env, res,
4370 +					RESERVATION_DEPLETED);
4371 +			} else {
4372 +				/* sanity check budget calculation */
4373 +				//BUG_ON(res->env->current_time >= tdres->cur_interval.end);
4374 +				//BUG_ON(res->env->current_time < tdres->cur_interval.start);
4375 +				if (res->env->current_time >= tdres->cur_interval.end)
4376 +					printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING1\n");
4377 +				if (res->env->current_time < tdres->cur_interval.start)
4378 +					printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING2\n");
4379 +			}
4380 +
4381 +			break;
4382 +	}
4383 +}
4384 +
4385 +static struct task_struct* td_dispatch_client(
4386 +	struct reservation *res,
4387 +	lt_t *for_at_most)
4388 +{
4389 +	struct task_struct *t;
4390 +	struct table_driven_reservation *tdres =
4391 +		container_of(res, struct table_driven_reservation, res);
4392 +
4393 +	/* usual logic for selecting a client */
4394 +	t = default_dispatch_client(res, for_at_most);
4395 +
4396 +	TRACE_TASK(t, "td_dispatch_client(%u): selected, budget=%llu\n",
4397 +		res->id, res->cur_budget);
4398 +
4399 +	/* check how much budget we have left in this time slot */
4400 +	res->cur_budget = td_time_remaining_until_end(tdres);
4401 +
4402 +	TRACE_TASK(t, "td_dispatch_client(%u): updated to budget=%llu next=%d\n",
4403 +		res->id, res->cur_budget, tdres->next_interval);
4404 +
4405 +	if (unlikely(!res->cur_budget)) {
4406 +		/* Unlikely case: if we ran out of budget, the user configured
4407 +		 * a broken scheduling table (overlapping table slots).
4408 +		 * Not much we can do about this, but we can't dispatch a job
4409 +		 * now without causing overload. So let's register this reservation
4410 +		 * as depleted and wait for the next allocation. */
4411 +		TRACE("td_dispatch_client(%u): budget unexpectedly depleted "
4412 +			"(check scheduling table for unintended overlap)\n",
4413 +			res->id);
4414 +		res->env->change_state(res->env, res,
4415 +			RESERVATION_DEPLETED);
4416 +		return NULL;
4417 +	} else
4418 +		return t;
4419 +}
4420 +
4421 +static struct reservation_ops td_ops = {
4422 +	.dispatch_client = td_dispatch_client,
4423 +	.client_arrives = td_client_arrives,
4424 +	.client_departs = td_client_departs,
4425 +	.replenish = td_replenish,
4426 +	.drain_budget = td_drain_budget,
4427 +};
4428 +
4429 +void table_driven_reservation_init(
4430 +	struct table_driven_reservation *tdres,
4431 +	lt_t major_cycle,
4432 +	struct lt_interval *intervals,
4433 +	unsigned int num_intervals)
4434 +{
4435 +	unsigned int i;
4436 +
4437 +	/* sanity checking */
4438 +	BUG_ON(!num_intervals);
4439 +	for (i = 0; i < num_intervals; i++)
4440 +		BUG_ON(intervals[i].end <= intervals[i].start);
4441 +	for (i = 0; i + 1 < num_intervals; i++)
4442 +		BUG_ON(intervals[i + 1].start <= intervals[i].end);
4443 +	BUG_ON(intervals[num_intervals - 1].end > major_cycle);
4444 +
4445 +	reservation_init(&tdres->res);
4446 +	tdres->major_cycle = major_cycle;
4447 +	tdres->intervals = intervals;
4448 +	tdres->cur_interval.start = 0;
4449 +	tdres->cur_interval.end   = 0;
4450 +	tdres->num_intervals = num_intervals;
4451 +	tdres->res.ops = &td_ops;
4452 +}
4453 diff --git a/litmus/replicate_lib.c b/litmus/replicate_lib.c
4454 new file mode 100644
4455 index 0000000..cfc5258
4456 --- /dev/null
4457 +++ b/litmus/replicate_lib.c
4458 @@ -0,0 +1,50 @@
4459 +#include <asm/uaccess.h>
4460 +#include <linux/uaccess.h>
4461 +#include <linux/init.h>
4462 +#include <linux/types.h>
4463 +#include <linux/kernel.h>
4464 +#include <linux/module.h>
4465 +#include <linux/sysctl.h>
4466 +#include <linux/slab.h>
4467 +#include <linux/io.h>
4468 +#include <linux/mutex.h>
4469 +#include <linux/time.h>
4470 +#include <linux/migrate.h>
4471 +#include <linux/mm.h>
4472 +#include <linux/memcontrol.h>
4473 +#include <linux/mm_inline.h>
4474 +
4475 +#include <litmus/litmus_proc.h>
4476 +#include <litmus/sched_trace.h>
4477 +#include <litmus/cache_proc.h>
4478 +#include <litmus/mc2_common.h>
4479 +#include <litmus/replicate_lib.h>
4480 +
4481 +DEFINE_PER_CPU(struct list_head, shared_lib_page_list);
4482 +
4483 +#define shared_lib_pages_for(cpu_id)	(&per_cpu(shared_lib_page_list, cpu_id))
4484 +#define local_shared_lib_pages()	(this_cpu_ptr(&shared_lib_page_list))
4485 +
4486 +#define INVALID_PFN				(0xffffffff)
4487 +
4488 +static int __init litmus_replicate_lib_init(void)
4489 +{
4490 +	int cpu, ret = 0;
4491 +
4492 +	printk(KERN_INFO "Registering LITMUS^RT Per-core Shared Library module.\n");
4493 +	
4494 +	for_each_online_cpu(cpu) {
4495 +		INIT_LIST_HEAD(shared_lib_pages_for(cpu));
4496 +		printk(KERN_INFO "CPU%d PSL-list initialized.\n", cpu);
4497 +	}
4498 +	
4499 +	return ret;
4500 +}
4501 +
4502 +static void litmus_replicate_lib_exit(void)
4503 +{
4504 +	return;
4505 +}
4506 +
4507 +module_init(litmus_replicate_lib_init);
4508 +module_exit(litmus_replicate_lib_exit);
4509 \ No newline at end of file
4510 diff --git a/litmus/reservation.c b/litmus/reservation.c
4511 new file mode 100644
4512 index 0000000..5eee01a
4513 --- /dev/null
4514 +++ b/litmus/reservation.c
4515 @@ -0,0 +1,706 @@
4516 +#include <linux/sched.h>
4517 +#include <linux/slab.h>
4518 +
4519 +#include <litmus/litmus.h>
4520 +#include <litmus/reservation.h>
4521 +
4522 +#define BUDGET_ENFORCEMENT_AT_C 0
4523 +	
4524 +void reservation_init(struct reservation *res)
4525 +{
4526 +	memset(res, sizeof(*res), 0);
4527 +	res->state = RESERVATION_INACTIVE;
4528 +	INIT_LIST_HEAD(&res->clients);
4529 +}
4530 +
4531 +struct task_struct* default_dispatch_client(
4532 +	struct reservation *res,
4533 +	lt_t *for_at_most)
4534 +{
4535 +	struct reservation_client *client, *next;
4536 +	struct task_struct* tsk;
4537 +
4538 +	BUG_ON(res->state != RESERVATION_ACTIVE);
4539 +	*for_at_most = 0;
4540 +
4541 +	list_for_each_entry_safe(client, next, &res->clients, list) {
4542 +		tsk = client->dispatch(client);
4543 +		if (likely(tsk)) {
4544 +			return tsk;
4545 +		}
4546 +	}
4547 +	return NULL;
4548 +}
4549 +
4550 +static struct task_struct * task_client_dispatch(struct reservation_client *client)
4551 +{
4552 +	struct task_client *tc = container_of(client, struct task_client, client);
4553 +	return tc->task;
4554 +}
4555 +
4556 +void task_client_init(struct task_client *tc, struct task_struct *tsk,
4557 +	struct reservation *res)
4558 +{
4559 +	memset(&tc->client, sizeof(tc->client), 0);
4560 +	tc->client.dispatch = task_client_dispatch;
4561 +	tc->client.reservation = res;
4562 +	tc->task = tsk;
4563 +}
4564 +
4565 +static void sup_scheduler_update_at(
4566 +	struct sup_reservation_environment* sup_env,
4567 +	lt_t when)
4568 +{
4569 +	//TRACE("SCHEDULER_UPDATE_AT update: %llu > when %llu\n", sup_env->next_scheduler_update, when);
4570 +	if (sup_env->next_scheduler_update > when)
4571 +		sup_env->next_scheduler_update = when;
4572 +}
4573 +
4574 +void sup_scheduler_update_after(
4575 +	struct sup_reservation_environment* sup_env,
4576 +	lt_t timeout)
4577 +{
4578 +	sup_scheduler_update_at(sup_env, sup_env->env.current_time + timeout);
4579 +}
4580 +
4581 +static int _sup_queue_depleted(
4582 +	struct sup_reservation_environment* sup_env,
4583 +	struct reservation *res)
4584 +{
4585 +	struct list_head *pos;
4586 +	struct reservation *queued;
4587 +	int passed_earlier = 0;
4588 +
4589 +	list_for_each(pos, &sup_env->depleted_reservations) {
4590 +		queued = list_entry(pos, struct reservation, list);
4591 +		if (queued->next_replenishment > res->next_replenishment) {
4592 +			list_add(&res->list, pos->prev);
4593 +			return passed_earlier;
4594 +		} else
4595 +			passed_earlier = 1;
4596 +	}
4597 +
4598 +	list_add_tail(&res->list, &sup_env->depleted_reservations);
4599 +
4600 +	return passed_earlier;
4601 +}
4602 +
4603 +static void sup_queue_depleted(
4604 +	struct sup_reservation_environment* sup_env,
4605 +	struct reservation *res)
4606 +{
4607 +	int passed_earlier = _sup_queue_depleted(sup_env, res);
4608 +
4609 +	/* check for updated replenishment time */
4610 +	if (!passed_earlier)
4611 +		sup_scheduler_update_at(sup_env, res->next_replenishment);
4612 +}
4613 +
4614 +static int _sup_queue_active(
4615 +	struct sup_reservation_environment* sup_env,
4616 +	struct reservation *res)
4617 +{
4618 +	struct list_head *pos;
4619 +	struct reservation *queued;
4620 +	int passed_active = 0;
4621 +
4622 +	list_for_each(pos, &sup_env->active_reservations) {
4623 +		queued = list_entry(pos, struct reservation, list);
4624 +		if (queued->priority > res->priority) {
4625 +			list_add(&res->list, pos->prev);
4626 +			return passed_active;
4627 +		} else if (queued->state == RESERVATION_ACTIVE)
4628 +			passed_active = 1;
4629 +	}
4630 +
4631 +	list_add_tail(&res->list, &sup_env->active_reservations);
4632 +	return passed_active;
4633 +}
4634 +
4635 +static void sup_queue_active(
4636 +	struct sup_reservation_environment* sup_env,
4637 +	struct reservation *res)
4638 +{
4639 +	int passed_active = _sup_queue_active(sup_env, res);
4640 +
4641 +	/* check for possible preemption */
4642 +	if (res->state == RESERVATION_ACTIVE && !passed_active)
4643 +		sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
4644 +	else {
4645 +		/* Active means this reservation is draining budget => make sure
4646 +		 * the scheduler is called to notice when the reservation budget has been
4647 +		 * drained completely. */
4648 +		sup_scheduler_update_after(sup_env, res->cur_budget);
4649 +	}
4650 +}
4651 +
4652 +static void sup_queue_reservation(
4653 +	struct sup_reservation_environment* sup_env,
4654 +	struct reservation *res)
4655 +{
4656 +	switch (res->state) {
4657 +		case RESERVATION_INACTIVE:
4658 +			list_add(&res->list, &sup_env->inactive_reservations);
4659 +			break;
4660 +
4661 +		case RESERVATION_DEPLETED:
4662 +			sup_queue_depleted(sup_env, res);
4663 +			break;
4664 +
4665 +		case RESERVATION_ACTIVE_IDLE:
4666 +		case RESERVATION_ACTIVE:
4667 +			sup_queue_active(sup_env, res);
4668 +			break;
4669 +	}
4670 +}
4671 +
4672 +void sup_add_new_reservation(
4673 +	struct sup_reservation_environment* sup_env,
4674 +	struct reservation* new_res)
4675 +{
4676 +	new_res->env = &sup_env->env;
4677 +	sup_queue_reservation(sup_env, new_res);
4678 +}
4679 +
4680 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
4681 +	unsigned int id)
4682 +{
4683 +	struct reservation *res;
4684 +
4685 +	list_for_each_entry(res, &sup_env->active_reservations, list) {
4686 +		if (res->id == id)
4687 +			return res;
4688 +	}
4689 +	list_for_each_entry(res, &sup_env->inactive_reservations, list) {
4690 +		if (res->id == id)
4691 +			return res;
4692 +	}
4693 +	list_for_each_entry(res, &sup_env->depleted_reservations, list) {
4694 +		if (res->id == id)
4695 +			return res;
4696 +	}
4697 +
4698 +	return NULL;
4699 +}
4700 +
4701 +static void sup_charge_budget(
4702 +	struct sup_reservation_environment* sup_env,
4703 +	lt_t delta)
4704 +{
4705 +	struct list_head *pos, *next;
4706 +	struct reservation *res;
4707 +
4708 +	int encountered_active = 0;
4709 +
4710 +	list_for_each_safe(pos, next, &sup_env->active_reservations) {
4711 +		/* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
4712 +		res = list_entry(pos, struct reservation, list);
4713 +		if (res->state == RESERVATION_ACTIVE) {
4714 +			TRACE("sup_charge_budget ACTIVE R%u drain %llu\n", res->id, delta);
4715 +			if (encountered_active == 0 && res->blocked_by_ghost == 0) {
4716 +				TRACE("DRAIN !!\n");
4717 +				res->ops->drain_budget(res, delta);
4718 +				encountered_active = 1;
4719 +			}			
4720 +		} else {
4721 +			//BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
4722 +			TRACE("sup_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
4723 +			res->ops->drain_budget(res, delta);
4724 +		}
4725 +		if (res->state == RESERVATION_ACTIVE ||
4726 +			res->state == RESERVATION_ACTIVE_IDLE)
4727 +		{
4728 +			/* make sure scheduler is invoked when this reservation expires
4729 +			 * its remaining budget */
4730 +			 TRACE("requesting scheduler update for reservation %u in %llu nanoseconds\n",
4731 +				res->id, res->cur_budget);
4732 +			 sup_scheduler_update_after(sup_env, res->cur_budget);
4733 +		}
4734 +		//if (encountered_active == 2)
4735 +			/* stop at the first ACTIVE reservation */
4736 +		//	break;
4737 +	}
4738 +	//TRACE("finished charging budgets\n");
4739 +}
4740 +
4741 +static void sup_replenish_budgets(struct sup_reservation_environment* sup_env)
4742 +{
4743 +	struct list_head *pos, *next;
4744 +	struct reservation *res;
4745 +
4746 +	list_for_each_safe(pos, next, &sup_env->depleted_reservations) {
4747 +		res = list_entry(pos, struct reservation, list);
4748 +		if (res->next_replenishment <= sup_env->env.current_time) {
4749 +			res->ops->replenish(res);
4750 +		} else {
4751 +			/* list is ordered by increasing depletion times */
4752 +			break;
4753 +		}
4754 +	}
4755 +	//TRACE("finished replenishing budgets\n");
4756 +
4757 +	/* request a scheduler update at the next replenishment instant */
4758 +	res = list_first_entry_or_null(&sup_env->depleted_reservations,
4759 +		struct reservation, list);
4760 +	if (res)
4761 +		sup_scheduler_update_at(sup_env, res->next_replenishment);
4762 +}
4763 +
4764 +void sup_update_time(
4765 +	struct sup_reservation_environment* sup_env,
4766 +	lt_t now)
4767 +{
4768 +	lt_t delta;
4769 +
4770 +	/* If the time didn't advance, there is nothing to do.
4771 +	 * This check makes it safe to call sup_advance_time() potentially
4772 +	 * multiple times (e.g., via different code paths. */
4773 +	//TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time);
4774 +	if (unlikely(now <= sup_env->env.current_time))
4775 +		return;
4776 +
4777 +	delta = now - sup_env->env.current_time;
4778 +	sup_env->env.current_time = now;
4779 +
4780 +	/* check if future updates are required */
4781 +	if (sup_env->next_scheduler_update <= sup_env->env.current_time)
4782 +		sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
4783 +
4784 +	/* deplete budgets by passage of time */
4785 +	//TRACE("CHARGE###\n");
4786 +	sup_charge_budget(sup_env, delta);
4787 +
4788 +	/* check if any budgets where replenished */
4789 +	//TRACE("REPLENISH###\n");
4790 +	sup_replenish_budgets(sup_env);
4791 +}
4792 +
4793 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env)
4794 +{
4795 +	struct reservation *res, *next;
4796 +	struct task_struct *tsk = NULL;
4797 +	lt_t time_slice;
4798 +
4799 +	list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
4800 +		if (res->state == RESERVATION_ACTIVE) {
4801 +			tsk = res->ops->dispatch_client(res, &time_slice);
4802 +			if (likely(tsk)) {
4803 +				if (time_slice)
4804 +				    sup_scheduler_update_after(sup_env, time_slice);
4805 +				sup_scheduler_update_after(sup_env, res->cur_budget);
4806 +				return tsk;
4807 +			}
4808 +		}
4809 +	}
4810 +
4811 +	return NULL;
4812 +}
4813 +
4814 +static void sup_res_change_state(
4815 +	struct reservation_environment* env,
4816 +	struct reservation *res,
4817 +	reservation_state_t new_state)
4818 +{
4819 +	struct sup_reservation_environment* sup_env;
4820 +
4821 +	sup_env = container_of(env, struct sup_reservation_environment, env);
4822 +
4823 +	TRACE("reservation R%d state %d->%d at %llu\n",
4824 +		res->id, res->state, new_state, env->current_time);
4825 +
4826 +	list_del(&res->list);
4827 +	/* check if we need to reschedule because we lost an active reservation */
4828 +	if (res->state == RESERVATION_ACTIVE && !sup_env->will_schedule)
4829 +		sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
4830 +	res->state = new_state;
4831 +	sup_queue_reservation(sup_env, res);
4832 +}
4833 +
4834 +void sup_init(struct sup_reservation_environment* sup_env)
4835 +{
4836 +	memset(sup_env, sizeof(*sup_env), 0);
4837 +
4838 +	INIT_LIST_HEAD(&sup_env->active_reservations);
4839 +	INIT_LIST_HEAD(&sup_env->depleted_reservations);
4840 +	INIT_LIST_HEAD(&sup_env->inactive_reservations);
4841 +
4842 +	sup_env->env.change_state = sup_res_change_state;
4843 +
4844 +	sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
4845 +}
4846 +
4847 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
4848 +	unsigned int id)
4849 +{
4850 +	struct reservation *res;
4851 +
4852 +	list_for_each_entry(res, &gmp_env->active_reservations, list) {
4853 +		if (res->id == id)
4854 +			return res;
4855 +	}
4856 +	list_for_each_entry(res, &gmp_env->inactive_reservations, list) {
4857 +		if (res->id == id)
4858 +			return res;
4859 +	}
4860 +	list_for_each_entry(res, &gmp_env->depleted_reservations, list) {
4861 +		if (res->id == id)
4862 +			return res;
4863 +	}
4864 +
4865 +	return NULL;
4866 +}
4867 +
4868 +
4869 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env,
4870 +	unsigned int id)
4871 +{
4872 +	struct next_timer_event *event;
4873 +
4874 +	list_for_each_entry(event, &gmp_env->next_events, list) {
4875 +		if (event->id == id)
4876 +			return event;
4877 +	}
4878 +
4879 +	return NULL;
4880 +}
4881 +
4882 +
4883 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env,
4884 +	lt_t when)
4885 +{
4886 +	struct next_timer_event *event;
4887 +
4888 +	list_for_each_entry(event, &gmp_env->next_events, list) {
4889 +		if (event->next_update == when)
4890 +			return event;
4891 +	}
4892 +
4893 +	return NULL;
4894 +}
4895 +
4896 +#define TIMER_RESOLUTION 100000L
4897 +
4898 +static void gmp_add_event(
4899 +	struct gmp_reservation_environment* gmp_env,
4900 +	lt_t when, unsigned int id, event_type_t type)
4901 +{
4902 +	struct next_timer_event *nevent, *queued;
4903 +	struct list_head *pos;
4904 +	int found = 0, update = 0;
4905 +
4906 +	//when = div64_u64(when, TIMER_RESOLUTION);
4907 +	//when *= TIMER_RESOLUTION;
4908 +//printk(KERN_ALERT "GMP_ADD id=%d type=%d when=%llu\n", id, type, when);
4909 +	nevent = gmp_find_event_by_id(gmp_env, id);
4910 +	
4911 +	if (nevent)
4912 +		TRACE("EVENT R%d update prev = %llu, new = %llu\n", nevent->id, nevent->next_update, when);
4913 +	
4914 +	if (nevent && nevent->next_update > when) {
4915 +		list_del(&nevent->list);
4916 +		update = 1;
4917 +		
4918 +	}
4919 +	
4920 +	if (!nevent || nevent->type != type || update == 1) {
4921 +		if (update == 0)
4922 +			nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
4923 +		BUG_ON(!nevent);
4924 +		nevent->next_update = when;
4925 +		nevent->id = id;
4926 +		nevent->type = type;
4927 +		nevent->timer_armed_on = NO_CPU;
4928 +
4929 +		list_for_each(pos, &gmp_env->next_events) {
4930 +			queued = list_entry(pos, struct next_timer_event, list);
4931 +			if (queued->next_update > nevent->next_update) {
4932 +				list_add(&nevent->list, pos->prev);
4933 +				found = 1;
4934 +				TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at before %llu\n", nevent->id, nevent->type, nevent->next_update, queued->next_update);
4935 +				break;
4936 +			}
4937 +		}
4938 +		
4939 +		if (!found) {
4940 +			list_add_tail(&nevent->list, &gmp_env->next_events);
4941 +			TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update);
4942 +		}
4943 +	} else {
4944 +		//TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
4945 +; //printk(KERN_ALERT "EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
4946 +	}
4947 +	
4948 +	TRACE("======START PRINTING EVENT LIST======\n");
4949 +	gmp_print_events(gmp_env, litmus_clock());
4950 +	TRACE("======FINISH PRINTING EVENT LIST======\n");
4951 +}
4952 +
4953 +void gmp_add_event_after(
4954 +	struct gmp_reservation_environment* gmp_env, lt_t timeout, unsigned int id, event_type_t type)
4955 +{
4956 +	//printk(KERN_ALERT "ADD_EVENT_AFTER id = %d\n", id);
4957 +	gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type);
4958 +}
4959 +
4960 +static void gmp_queue_depleted(
4961 +	struct gmp_reservation_environment* gmp_env,
4962 +	struct reservation *res)
4963 +{
4964 +	struct list_head *pos;
4965 +	struct reservation *queued;
4966 +	int found = 0;
4967 +
4968 +//printk(KERN_ALERT "R%d request to enqueue depleted_list\n", res->id);
4969 +	
4970 +	list_for_each(pos, &gmp_env->depleted_reservations) {
4971 +		queued = list_entry(pos, struct reservation, list);
4972 +		if (queued && (queued->next_replenishment > res->next_replenishment)) {
4973 +//printk(KERN_ALERT "QUEUED R%d %llu\n", queued->id, queued->next_replenishment);
4974 +			list_add(&res->list, pos->prev);
4975 +			found = 1;
4976 +			break;
4977 +		}
4978 +	}
4979 +
4980 +	if (!found)
4981 +		list_add_tail(&res->list, &gmp_env->depleted_reservations);
4982 +
4983 +	TRACE("R%d queued to depleted_list\n", res->id);
4984 +//printk(KERN_ALERT "R%d queued to depleted_list\n", res->id);
4985 +	gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
4986 +}
4987 +
4988 +static void gmp_queue_active(
4989 +	struct gmp_reservation_environment* gmp_env,
4990 +	struct reservation *res)
4991 +{
4992 +	struct list_head *pos;
4993 +	struct reservation *queued;
4994 +	int check_preempt = 1, found = 0;
4995 +
4996 +	list_for_each(pos, &gmp_env->active_reservations) {
4997 +		queued = list_entry(pos, struct reservation, list);
4998 +		if (queued->priority > res->priority) {
4999 +			list_add(&res->list, pos->prev);
5000 +			found = 1;
5001 +			break;
5002 +		} else if (queued->scheduled_on == NO_CPU)
5003 +			check_preempt = 0;
5004 +	}
5005 +
5006 +	if (!found)
5007 +		list_add_tail(&res->list, &gmp_env->active_reservations);
5008 +
5009 +	/* check for possible preemption */
5010 +	if (res->state == RESERVATION_ACTIVE && check_preempt)
5011 +		gmp_env->schedule_now++;
5012 +
5013 +#if BUDGET_ENFORCEMENT_AT_C	
5014 +	gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
5015 +#endif
5016 +	res->event_added = 1;	
5017 +}
5018 +
5019 +static void gmp_queue_reservation(
5020 +	struct gmp_reservation_environment* gmp_env,
5021 +	struct reservation *res)
5022 +{
5023 +
5024 +//printk(KERN_ALERT "DEBUG: Passed %s %d %p R%d STATE %d\n",__FUNCTION__,__LINE__, gmp_env, res->id, res->state);
5025 +	switch (res->state) {
5026 +		case RESERVATION_INACTIVE:
5027 +			list_add(&res->list, &gmp_env->inactive_reservations);
5028 +			break;
5029 +
5030 +		case RESERVATION_DEPLETED:
5031 +			gmp_queue_depleted(gmp_env, res);
5032 +			break;
5033 +
5034 +		case RESERVATION_ACTIVE_IDLE:
5035 +		case RESERVATION_ACTIVE:
5036 +			gmp_queue_active(gmp_env, res);
5037 +			break;
5038 +	}
5039 +}
5040 +
5041 +void gmp_add_new_reservation(
5042 +	struct gmp_reservation_environment* gmp_env,
5043 +	struct reservation* new_res)
5044 +{
5045 +	new_res->env = &gmp_env->env;
5046 +	gmp_queue_reservation(gmp_env, new_res);
5047 +}
5048 +
5049 +#if BUDGET_ENFORCEMENT_AT_C
5050 +static void gmp_charge_budget(
5051 +	struct gmp_reservation_environment* gmp_env,
5052 +	lt_t delta)
5053 +{
5054 +	struct list_head *pos, *next;
5055 +	struct reservation *res;
5056 +
5057 +	list_for_each_safe(pos, next, &gmp_env->active_reservations) {
5058 +		int drained = 0;
5059 +		/* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
5060 +		res = list_entry(pos, struct reservation, list);
5061 +		if (res->state == RESERVATION_ACTIVE) {
5062 +			TRACE("gmp_charge_budget ACTIVE R%u scheduled_on=%d drain %llu\n", res->id, res->scheduled_on, delta);
5063 +			if (res->scheduled_on != NO_CPU && res->blocked_by_ghost == 0) {
5064 +				TRACE("DRAIN !!\n");
5065 +				drained = 1;
5066 +				res->ops->drain_budget(res, delta);
5067 +			} else {
5068 +				TRACE("NO DRAIN (not scheduled)!!\n");
5069 +			}
5070 +		} else {
5071 +			//BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
5072 +			if (res->state != RESERVATION_ACTIVE_IDLE)
5073 +				TRACE("BUG!!!!!!!!!!!! gmp_charge_budget()\n");
5074 +			TRACE("gmp_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
5075 +			//if (res->is_ghost != NO_CPU) {
5076 +				TRACE("DRAIN !!\n");
5077 +				drained = 1;
5078 +				res->ops->drain_budget(res, delta);
5079 +			//}
5080 +		}
5081 +		if ((res->state == RESERVATION_ACTIVE ||
5082 +			res->state == RESERVATION_ACTIVE_IDLE) && (drained == 1))
5083 +		{
5084 +			/* make sure scheduler is invoked when this reservation expires
5085 +			 * its remaining budget */
5086 +			 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
5087 +			 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
5088 +			 res->event_added = 1;
5089 +		}
5090 +		//if (encountered_active == 2)
5091 +			/* stop at the first ACTIVE reservation */
5092 +		//	break;
5093 +	}
5094 +	//TRACE("finished charging budgets\n");
5095 +}
5096 +#else
5097 +
5098 +static void gmp_charge_budget(
5099 +	struct gmp_reservation_environment* gmp_env,
5100 +	lt_t delta)
5101 +{
5102 +	return;
5103 +}
5104 +
5105 +#endif
5106 +
5107 +static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
5108 +{
5109 +	struct list_head *pos, *next;
5110 +	struct reservation *res;
5111 +
5112 +	list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
5113 +		res = list_entry(pos, struct reservation, list);
5114 +		if (res->next_replenishment <= gmp_env->env.current_time) {
5115 +			res->ops->replenish(res);
5116 +			if (res->is_ghost != NO_CPU) {
5117 +				TRACE("R%d replenished! scheduled_on=%d\n", res->id, res->scheduled_on);
5118 +			}
5119 +		} else {
5120 +			/* list is ordered by increasing depletion times */
5121 +			break;
5122 +		}
5123 +	}
5124 +	//TRACE("finished replenishing budgets\n");
5125 +}
5126 +
5127 +#define EPSILON	50
5128 +
5129 +/* return schedule_now */
5130 +int gmp_update_time(
5131 +	struct gmp_reservation_environment* gmp_env,
5132 +	lt_t now)
5133 +{
5134 +	struct next_timer_event *event, *next;
5135 +	lt_t delta, ret;
5136 +
5137 +	/* If the time didn't advance, there is nothing to do.
5138 +	 * This check makes it safe to call sup_advance_time() potentially
5139 +	 * multiple times (e.g., via different code paths. */
5140 +	//TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
5141 +	if (unlikely(now <= gmp_env->env.current_time + EPSILON))
5142 +		return 0;
5143 +
5144 +	delta = now - gmp_env->env.current_time;
5145 +	gmp_env->env.current_time = now;
5146 +
5147 +
5148 +	//gmp_print_events(gmp_env, now);
5149 +	/* deplete budgets by passage of time */
5150 +	//TRACE("CHARGE###\n");
5151 +	gmp_charge_budget(gmp_env, delta);
5152 +
5153 +	/* check if any budgets where replenished */
5154 +	//TRACE("REPLENISH###\n");
5155 +	gmp_replenish_budgets(gmp_env);
5156 +
5157 +	
5158 +	list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
5159 +		if (event->next_update < now) {
5160 +			list_del(&event->list);
5161 +			//TRACE("EVENT at %llu IS DELETED\n", event->next_update);
5162 +			kfree(event);
5163 +		} else {
5164 +			break;
5165 +		}
5166 +	}		
5167 +	
5168 +	//gmp_print_events(gmp_env, litmus_clock());
5169 +	
5170 +	ret = min(gmp_env->schedule_now, NR_CPUS);
5171 +	gmp_env->schedule_now = 0;
5172 +
5173 +	return ret;
5174 +}
5175 +
5176 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now)
5177 +{
5178 +	struct next_timer_event *event, *next;
5179 +
5180 +	TRACE("GLOBAL EVENTS now=%llu\n", now);
5181 +	list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
5182 +		TRACE("at %llu type=%d id=%d armed_on=%d\n", event->next_update, event->type, event->id, event->timer_armed_on);
5183 +	}		
5184 +}
5185 +
5186 +static void gmp_res_change_state(
5187 +	struct reservation_environment* env,
5188 +	struct reservation *res,
5189 +	reservation_state_t new_state)
5190 +{
5191 +	struct gmp_reservation_environment* gmp_env;
5192 +
5193 +	gmp_env = container_of(env, struct gmp_reservation_environment, env);
5194 +
5195 +	TRACE("GMP reservation R%d state %d->%d at %llu\n",
5196 +		res->id, res->state, new_state, env->current_time);
5197 +
5198 +	list_del(&res->list);
5199 +	/* check if we need to reschedule because we lost an active reservation */
5200 +	if (res->state == RESERVATION_ACTIVE)
5201 +		gmp_env->schedule_now++;
5202 +	res->state = new_state;
5203 +	gmp_queue_reservation(gmp_env, res);
5204 +}
5205 +
5206 +void gmp_init(struct gmp_reservation_environment* gmp_env)
5207 +{
5208 +	memset(gmp_env, sizeof(*gmp_env), 0);
5209 +
5210 +	INIT_LIST_HEAD(&gmp_env->active_reservations);
5211 +	INIT_LIST_HEAD(&gmp_env->depleted_reservations);
5212 +	INIT_LIST_HEAD(&gmp_env->inactive_reservations);
5213 +	INIT_LIST_HEAD(&gmp_env->next_events);
5214 +
5215 +	gmp_env->env.change_state = gmp_res_change_state;
5216 +
5217 +	gmp_env->schedule_now = 0;
5218 +	gmp_env->will_schedule = false;
5219 +	
5220 +	raw_spin_lock_init(&gmp_env->lock);
5221 +}
5222 diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
5223 new file mode 100644
5224 index 0000000..6c02a56
5225 --- /dev/null
5226 +++ b/litmus/sched_mc2.c
5227 @@ -0,0 +1,1577 @@
5228 +/*
5229 + * litmus/sched_mc2.c
5230 + *
5231 + * Implementation of the Mixed-Criticality on MultiCore scheduler
5232 + *
5233 + * This plugin implements a scheduling algorithm proposed in 
5234 + * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper.
5235 + */ 
5236 + 
5237 +#include <linux/percpu.h>
5238 +#include <linux/slab.h>
5239 +#include <asm/uaccess.h>
5240 +
5241 +#include <litmus/sched_plugin.h>
5242 +#include <litmus/preempt.h>
5243 +#include <litmus/debug_trace.h>
5244 +
5245 +#include <litmus/litmus.h>
5246 +#include <litmus/jobs.h>
5247 +#include <litmus/budget.h>
5248 +#include <litmus/litmus_proc.h>
5249 +#include <litmus/sched_trace.h>
5250 +#include <litmus/cache_proc.h>
5251 +#include <litmus/trace.h>
5252 +
5253 +#include <litmus/mc2_common.h>
5254 +#include <litmus/reservation.h>
5255 +#include <litmus/polling_reservations.h>
5256 +
5257 +#define BUDGET_ENFORCEMENT_AT_C 0
5258 +
5259 +extern void do_partition(enum crit_level lv, int cpu);
5260 +
5261 +/* _global_env - reservation container for level-C tasks*/
5262 +struct gmp_reservation_environment _global_env;
5263 +
5264 +/* cpu_entry - keep track of a running task on a cpu
5265 + * This state is used to decide the lowest priority cpu
5266 + */
5267 +struct cpu_entry {
5268 +	struct task_struct *scheduled;
5269 +	lt_t deadline;
5270 +	int cpu;
5271 +	enum crit_level lv;
5272 +	/* if will_schedule is true, this cpu is already selected and
5273 +	   call mc2_schedule() soon. */
5274 +	bool will_schedule;
5275 +};
5276 +
5277 +/* cpu_priority - a global state for choosing the lowest priority CPU */
5278 +struct cpu_priority {
5279 +	raw_spinlock_t lock;
5280 +	struct cpu_entry cpu_entries[NR_CPUS];
5281 +};
5282 +
5283 +struct cpu_priority _lowest_prio_cpu;
5284 +	
5285 +/* mc2_task_state - a task state structure */
5286 +struct mc2_task_state {
5287 +	struct task_client res_info;
5288 +	/* if cpu == -1, this task is a global task (level C) */
5289 +	int cpu;
5290 +	bool has_departed;
5291 +	struct mc2_task mc2_param;
5292 +};
5293 +
5294 +/* crit_entry - maintain the logically running job (ghost job) */
5295 +struct crit_entry {
5296 +	enum crit_level level;
5297 +	struct task_struct *running;
5298 +};
5299 +
5300 +/* mc2_cpu_state - maintain the scheduled state and ghost jobs
5301 + * timer : timer for partitioned tasks (level A and B)
5302 + * g_timer : timer for global tasks (level C)
5303 + */
5304 +struct mc2_cpu_state {
5305 +	raw_spinlock_t lock;
5306 +
5307 +	struct sup_reservation_environment sup_env;
5308 +	struct hrtimer timer;
5309 +
5310 +	int cpu;
5311 +	struct task_struct* scheduled;
5312 +	struct crit_entry crit_entries[NUM_CRIT_LEVELS];
5313 +};
5314 +
5315 +static int resched_cpu[NR_CPUS];
5316 +static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state);
5317 +static int level_a_priorities[NR_CPUS];
5318 +
5319 +#define cpu_state_for(cpu_id)	(&per_cpu(mc2_cpu_state, cpu_id))
5320 +#define local_cpu_state()	(this_cpu_ptr(&mc2_cpu_state))
5321 +
5322 +/* get_mc2_state - get the task's state */
5323 +static struct mc2_task_state* get_mc2_state(struct task_struct *tsk)
5324 +{
5325 +	struct mc2_task_state* tinfo;
5326 +	
5327 +	tinfo = (struct mc2_task_state*)tsk_rt(tsk)->plugin_state;
5328 +	
5329 +	if (tinfo)
5330 +		return tinfo;
5331 +	else
5332 +		return NULL;
5333 +}
5334 +
5335 +/* get_task_crit_level - return the criticaility level of a task */
5336 +static enum crit_level get_task_crit_level(struct task_struct *tsk)
5337 +{
5338 +	struct mc2_task *mp;
5339 +	
5340 +	if (!tsk || !is_realtime(tsk))
5341 +		return NUM_CRIT_LEVELS;
5342 +	
5343 +	mp = tsk_rt(tsk)->mc2_data;
5344 +	
5345 +	if (!mp)
5346 +		return NUM_CRIT_LEVELS;
5347 +	else
5348 +		return mp->crit;
5349 +}
5350 +
5351 +/* task_depart - remove a task from its reservation
5352 + *               If the job has remaining budget, convert it to a ghost job
5353 + *               and update crit_entries[]
5354 + *               
5355 + * @job_complete	indicate whether job completes or not              
5356 + */
5357 +static void task_departs(struct task_struct *tsk, int job_complete)
5358 +{
5359 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
5360 +
5361 +	struct reservation* res = NULL;
5362 +	struct reservation_client *client = NULL;
5363 +
5364 +	BUG_ON(!is_realtime(tsk));
5365 +	
5366 +	res    = tinfo->res_info.client.reservation;
5367 +	client = &tinfo->res_info.client;
5368 +	BUG_ON(!res);
5369 +	BUG_ON(!client);
5370 +
5371 +    /* No ghost job handling, empty remaining budget */
5372 +	if (job_complete) {
5373 +		res->cur_budget = 0;
5374 +		sched_trace_task_completion(tsk, 0);
5375 +	}
5376 +
5377 +	res->ops->client_departs(res, client, job_complete);
5378 +	tinfo->has_departed = true;
5379 +	TRACE_TASK(tsk, "Client departs with budget %llu at %llu\n", res->cur_budget, litmus_clock());
5380 +}
5381 +
5382 +/* task_arrive - put a task into its reservation
5383 + *               If the job was a ghost job, remove it from crit_entries[]
5384 + */
5385 +static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
5386 +{
5387 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
5388 +	struct reservation* res;
5389 +	struct reservation_client *client;
5390 +	enum crit_level lv = get_task_crit_level(tsk);
5391 +
5392 +	res    = tinfo->res_info.client.reservation;
5393 +	client = &tinfo->res_info.client;
5394 +
5395 +	tinfo->has_departed = false;
5396 +
5397 +	res->ops->client_arrives(res, client);
5398 +	TRACE_TASK(tsk, "Client arrives at %llu\n", litmus_clock());
5399 +	
5400 +	if (lv != NUM_CRIT_LEVELS) {
5401 +		struct crit_entry *ce;
5402 +		ce = &state->crit_entries[lv];
5403 +		/* if the currrent task is a ghost job, remove it */
5404 +		if (ce->running == tsk)
5405 +			ce->running = NULL;
5406 +	}
5407 +}
5408 +
5409 +/* get_lowest_prio_cpu - return the lowest priority cpu
5410 + *                       This will be used for scheduling level-C tasks.
5411 + *                       If all CPUs are running tasks which has
5412 + *                       higher priority than level C, return NO_CPU.
5413 + */
5414 +static int get_lowest_prio_cpu(lt_t priority)
5415 +{
5416 +	struct cpu_entry *ce;
5417 +	int cpu, ret = NO_CPU;
5418 +	lt_t latest_deadline = 0;
5419 +	
5420 +	ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
5421 +	if (!ce->will_schedule && !ce->scheduled) {
5422 +		TRACE("CPU %d (local) is the lowest!\n", ce->cpu);
5423 +		return ce->cpu;
5424 +	} else {
5425 +		TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0);
5426 +	}
5427 +
5428 +	for_each_online_cpu(cpu) {
5429 +		ce = &_lowest_prio_cpu.cpu_entries[cpu];
5430 +		/* If a CPU will call schedule() in the near future, we don't
5431 +		   return that CPU. */
5432 +		TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule,
5433 +	      ce->scheduled ? (ce->scheduled)->comm : "null",
5434 +	      ce->scheduled ? (ce->scheduled)->pid : 0,
5435 +	      ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0);
5436 +		if (!ce->will_schedule) {
5437 +			if (!ce->scheduled) {
5438 +				/* Idle cpu, return this. */
5439 +				TRACE("CPU %d is the lowest!\n", ce->cpu);
5440 +				return ce->cpu;
5441 +			} else if (ce->lv == CRIT_LEVEL_C && 
5442 +			           ce->deadline > latest_deadline) {
5443 +				latest_deadline = ce->deadline;
5444 +				ret = ce->cpu;
5445 +			}
5446 +		}
5447 +	}		
5448 +
5449 +	if (priority >= latest_deadline)
5450 +		ret = NO_CPU;
5451 +	
5452 +	TRACE("CPU %d is the lowest!\n", ret);
5453 +
5454 +	return ret;
5455 +}
5456 +
5457 +/* NOTE: drops state->lock */
5458 +/* mc2_update_timer_and_unlock - set a timer and g_timer and unlock 
5459 + *                               Whenever res_env.current_time is updated,
5460 + *                               we check next_scheduler_update and set 
5461 + *                               a timer.
5462 + *                               If there exist a global event which is 
5463 + *                               not armed on any CPU and g_timer is not
5464 + *                               active, set a g_timer for that event.
5465 + */
5466 +static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
5467 +{
5468 +	int local, cpus;
5469 +	lt_t update, now;
5470 +	struct next_timer_event *event, *next;
5471 +	int reschedule[NR_CPUS];
5472 +	
5473 +	for (cpus = 0; cpus<NR_CPUS; cpus++)
5474 +		reschedule[cpus] = 0;
5475 +	
5476 +	update = state->sup_env.next_scheduler_update;
5477 +	now = state->sup_env.env.current_time;
5478 +
5479 +	/* Be sure we're actually running on the right core,
5480 +	 * as pres_update_timer() is also called from pres_task_resume(),
5481 +	 * which might be called on any CPU when a thread resumes.
5482 +	 */
5483 +	local = local_cpu_state() == state;
5484 +
5485 +	raw_spin_lock(&_global_env.lock);
5486 +		
5487 +	list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
5488 +		/* If the event time is already passed, we call schedule() on
5489 +		   the lowest priority cpu */
5490 +		if (event->next_update >= update) {
5491 +			break;
5492 +		}
5493 +		
5494 +		if (event->next_update < litmus_clock()) {
5495 +			if (event->timer_armed_on == NO_CPU) {
5496 +				struct reservation *res = gmp_find_by_id(&_global_env, event->id);
5497 +				int cpu = get_lowest_prio_cpu(res?res->priority:0);
5498 +				TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
5499 +				list_del(&event->list);
5500 +				kfree(event);
5501 +				if (cpu != NO_CPU) {
5502 +					_lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
5503 +					if (cpu == local_cpu_state()->cpu)
5504 +						litmus_reschedule_local();
5505 +					else
5506 +						reschedule[cpu] = 1;
5507 +				}
5508 +			}
5509 +		} else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) {
5510 +			event->timer_armed_on = state->cpu;
5511 +			update = event->next_update;
5512 +			break;
5513 +		}
5514 +	}
5515 +	
5516 +	/* Must drop state lock before calling into hrtimer_start(), which
5517 +	 * may raise a softirq, which in turn may wake ksoftirqd. */
5518 +	raw_spin_unlock(&_global_env.lock);
5519 +	raw_spin_unlock(&state->lock);
5520 +		
5521 +	if (update <= now || reschedule[state->cpu]) {
5522 +		reschedule[state->cpu] = 0;
5523 +		litmus_reschedule(state->cpu);
5524 +	} else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) {
5525 +		/* Reprogram only if not already set correctly. */
5526 +		if (!hrtimer_active(&state->timer) ||
5527 +		    ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) {
5528 +			TRACE("canceling timer...at %llu\n", 
5529 +			      ktime_to_ns(hrtimer_get_expires(&state->timer)));
5530 +			hrtimer_cancel(&state->timer);
5531 +			TRACE("setting scheduler timer for %llu\n", update);
5532 +			/* We cannot use hrtimer_start() here because the
5533 +			 * wakeup flag must be set to zero. */
5534 +			__hrtimer_start_range_ns(&state->timer,
5535 +					ns_to_ktime(update),
5536 +					0 /* timer coalescing slack */,
5537 +					HRTIMER_MODE_ABS_PINNED,
5538 +					0 /* wakeup */);
5539 +			if (update < litmus_clock()) {
5540 +				/* uh oh, timer expired while trying to set it */
5541 +				TRACE("timer expired during setting "
5542 +				      "update:%llu now:%llu actual:%llu\n",
5543 +				      update, now, litmus_clock());
5544 +	 			/* The timer HW may not have been reprogrammed
5545 +	 			 * correctly; force rescheduling now. */
5546 +				litmus_reschedule(state->cpu);
5547 +			}
5548 +		}
5549 +	} else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) {
5550 +		/* Poke remote core only if timer needs to be set earlier than
5551 +		 * it is currently set.
5552 +		 */
5553 +		TRACE("mc2_update_timer for remote CPU %d (update=%llu, "
5554 +		      "active:%d, set:%llu)\n",
5555 +			state->cpu,
5556 +			update,
5557 +			hrtimer_active(&state->timer),
5558 +			ktime_to_ns(hrtimer_get_expires(&state->timer)));
5559 +		if (!hrtimer_active(&state->timer) ||
5560 +		    ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) {
5561 +			TRACE("poking CPU %d so that it can update its "
5562 +			       "scheduling timer (active:%d, set:%llu)\n",
5563 +			       state->cpu,
5564 +			       hrtimer_active(&state->timer),
5565 +			       ktime_to_ns(hrtimer_get_expires(&state->timer)));
5566 +			raw_spin_lock(&state->lock);
5567 +			preempt_if_preemptable(state->scheduled, state->cpu);
5568 +			raw_spin_unlock(&state->lock);
5569 +			reschedule[state->cpu] = 0;
5570 +		}
5571 +	}
5572 +	for (cpus = 0; cpus<NR_CPUS; cpus++) {
5573 +		if (reschedule[cpus]) {
5574 +			litmus_reschedule(cpus);
5575 +		}
5576 +	}
5577 +}
5578 +
5579 +/* update_cpu_prio - Update cpu's priority
5580 + *                   When a cpu picks a new task, call this function
5581 + *                   to update cpu priorities.
5582 + */
5583 +static void update_cpu_prio(struct mc2_cpu_state *state)
5584 +{
5585 +	struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu];
5586 +	enum crit_level lv = get_task_crit_level(state->scheduled);
5587 +	
5588 +	if (!state->scheduled) {
5589 +		/* cpu is idle. */
5590 +		ce->scheduled = NULL;
5591 +		ce->deadline = ULLONG_MAX;
5592 +		ce->lv = NUM_CRIT_LEVELS;
5593 +	} else if (lv == CRIT_LEVEL_C) {
5594 +		ce->scheduled = state->scheduled;
5595 +		ce->deadline = get_deadline(state->scheduled);
5596 +		ce->lv = lv;
5597 +	} else if (lv < CRIT_LEVEL_C) {
5598 +		/* If cpu is running level A or B tasks, it is not eligible
5599 +		   to run level-C tasks */
5600 +		ce->scheduled = state->scheduled;
5601 +		ce->deadline = 0;
5602 +		ce->lv = lv;
5603 +	}
5604 +};
5605 +
5606 +/* on_scheduling_timer - timer event for partitioned tasks
5607 + */                       
5608 +static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
5609 +{
5610 +	unsigned long flags;
5611 +	enum hrtimer_restart restart = HRTIMER_NORESTART;
5612 +	struct mc2_cpu_state *state;
5613 +	lt_t update, now;
5614 +	int global_schedule_now;
5615 +	int reschedule[NR_CPUS];
5616 +	int cpus;
5617 +	
5618 +	for (cpus = 0; cpus<NR_CPUS; cpus++)
5619 +		reschedule[cpus] = 0;
5620 +	
5621 +	state = container_of(timer, struct mc2_cpu_state, timer);
5622 +
5623 +	/* The scheduling timer should only fire on the local CPU, because
5624 +	 * otherwise deadlocks via timer_cancel() are possible.
5625 +	 * Note: this does not interfere with dedicated interrupt handling, as
5626 +	 * even under dedicated interrupt handling scheduling timers for
5627 +	 * budget enforcement must occur locally on each CPU.
5628 +	 */
5629 +	BUG_ON(state->cpu != raw_smp_processor_id());
5630 +
5631 +	TS_ISR_START;
5632 +	
5633 +	TRACE("Timer fired at %llu\n", litmus_clock());
5634 +	raw_spin_lock_irqsave(&state->lock, flags);
5635 +	now = litmus_clock();
5636 +	sup_update_time(&state->sup_env, now);
5637 +
5638 +	update = state->sup_env.next_scheduler_update;
5639 +	now = state->sup_env.env.current_time;
5640 +
5641 +	if (update <= now) {
5642 +		litmus_reschedule_local();
5643 +	} else if (update != SUP_NO_SCHEDULER_UPDATE) {
5644 +		hrtimer_set_expires(timer, ns_to_ktime(update));
5645 +		restart = HRTIMER_RESTART;
5646 +	}
5647 +
5648 +	raw_spin_lock(&_global_env.lock);
5649 +	global_schedule_now = gmp_update_time(&_global_env, now);
5650 +	
5651 +	BUG_ON(global_schedule_now < 0 || global_schedule_now > 4);
5652 +	
5653 +	/* Find the lowest cpu, and call reschedule */
5654 +	while (global_schedule_now--) {
5655 +		int cpu = get_lowest_prio_cpu(0);
5656 +		if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
5657 +			_lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
5658 +			TRACE("LOWEST CPU = P%d\n", cpu);
5659 +			if (cpu == state->cpu && update > now)
5660 +				litmus_reschedule_local();
5661 +			else
5662 +				reschedule[cpu] = 1;
5663 +		}
5664 +	} 
5665 +	raw_spin_unlock(&_global_env.lock);
5666 +	
5667 +	raw_spin_unlock_irqrestore(&state->lock, flags);
5668 +	
5669 +	TS_ISR_END;
5670 +	
5671 +	for (cpus = 0; cpus<NR_CPUS; cpus++) {
5672 +		if (reschedule[cpus]) {
5673 +			litmus_reschedule(cpus);
5674 +		}
5675 +	}
5676 +	
5677 +	
5678 +	return restart;
5679 +}
5680 +
5681 +/* mc2_complete_job - syscall backend for job completions
5682 + */
5683 +static long mc2_complete_job(void)
5684 +{
5685 +	ktime_t next_release;
5686 +	long err;
5687 +
5688 +	tsk_rt(current)->completed = 1;
5689 +	
5690 +	/* If this the first job instance, we need to reset replenish
5691 +	   time to the next release time */
5692 +	if (tsk_rt(current)->sporadic_release) {
5693 +		struct mc2_cpu_state *state;
5694 +		struct reservation_environment *env;
5695 +		struct mc2_task_state *tinfo;
5696 +		struct reservation *res = NULL;
5697 +		unsigned long flags;
5698 +		enum crit_level lv;
5699 +
5700 +		preempt_disable();
5701 +		local_irq_save(flags);
5702 +		
5703 +		tinfo = get_mc2_state(current);
5704 +		lv = get_task_crit_level(current);
5705 +		
5706 +		if (lv < CRIT_LEVEL_C) {
5707 +			state = cpu_state_for(tinfo->cpu);
5708 +			raw_spin_lock(&state->lock);
5709 +			env = &(state->sup_env.env);
5710 +			res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
5711 +			env->time_zero = tsk_rt(current)->sporadic_release_time;
5712 +		}
5713 +		else if (lv == CRIT_LEVEL_C) {
5714 +			state = local_cpu_state();		
5715 +			raw_spin_lock(&state->lock);
5716 +			raw_spin_lock(&_global_env.lock);
5717 +			res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id);
5718 +			_global_env.env.time_zero = tsk_rt(current)->sporadic_release_time;
5719 +		}
5720 +		else
5721 +			BUG();
5722 +
5723 +		/* set next_replenishtime to synchronous release time */
5724 +		BUG_ON(!res);
5725 +		res->next_replenishment = tsk_rt(current)->sporadic_release_time;
5726 +		res->cur_budget = 0;
5727 +		res->env->change_state(res->env, res, RESERVATION_DEPLETED);
5728 +
5729 +		if (lv == CRIT_LEVEL_C)
5730 +			raw_spin_unlock(&_global_env.lock);
5731 +		
5732 +		raw_spin_unlock(&state->lock);
5733 +		local_irq_restore(flags);
5734 +		preempt_enable();
5735 +	}
5736 +	
5737 +	sched_trace_task_completion(current, 0);		
5738 +	/* update the next release time and deadline */
5739 +	prepare_for_next_period(current);
5740 +	sched_trace_task_release(current);
5741 +	next_release = ns_to_ktime(get_release(current));
5742 +	preempt_disable();
5743 +	TRACE_CUR("next_release=%llu\n", get_release(current));
5744 +	if (get_release(current) > litmus_clock()) {
5745 +		/* sleep until next_release */
5746 +		set_current_state(TASK_INTERRUPTIBLE);
5747 +		preempt_enable_no_resched();
5748 +		err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS);
5749 +	} else {
5750 +		/* release the next job immediately */
5751 +		err = 0;
5752 +		TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock());
5753 +		preempt_enable();
5754 +	}
5755 +
5756 +	TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock());
5757 +
5758 +	tsk_rt(current)->completed = 0;
5759 +	return err;
5760 +}
5761 +
5762 +/* mc2_dispatch - Select the next task to schedule.
5763 + */
5764 +struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state)
5765 +{
5766 +	struct reservation *res, *next;
5767 +	struct task_struct *tsk = NULL;
5768 +	struct crit_entry *ce;
5769 +	enum crit_level lv;
5770 +	lt_t time_slice;
5771 +
5772 +	list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
5773 +		if (res->state == RESERVATION_ACTIVE) {
5774 +			tsk = res->ops->dispatch_client(res, &time_slice);
5775 +			if (likely(tsk)) {
5776 +				lv = get_task_crit_level(tsk);
5777 +				if (lv == NUM_CRIT_LEVELS) {
5778 +					sup_scheduler_update_after(sup_env, res->cur_budget);
5779 +					return tsk;
5780 +				} else {
5781 +					ce = &state->crit_entries[lv];
5782 +					sup_scheduler_update_after(sup_env, res->cur_budget);
5783 +					res->blocked_by_ghost = 0;
5784 +					res->is_ghost = NO_CPU;
5785 +					return tsk;
5786 +				}
5787 +			}
5788 +		}
5789 +	}
5790 +	
5791 +	return NULL;
5792 +}
5793 +
5794 +struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
5795 +{
5796 +	struct reservation *res, *next;
5797 +	struct task_struct *tsk = NULL;
5798 +
5799 +	enum crit_level lv;
5800 +	lt_t time_slice;
5801 +	
5802 +	list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
5803 +		BUG_ON(!res);
5804 +		if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
5805 +			tsk = res->ops->dispatch_client(res, &time_slice);
5806 +			if (likely(tsk)) {
5807 +				lv = get_task_crit_level(tsk);
5808 +				if (lv == NUM_CRIT_LEVELS) {
5809 +#if BUDGET_ENFORCEMENT_AT_C			
5810 +					gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
5811 +#endif
5812 +					res->event_added = 1;
5813 +					res->blocked_by_ghost = 0;
5814 +					res->is_ghost = NO_CPU;
5815 +					res->scheduled_on = state->cpu;
5816 +					return tsk;
5817 +				} else if (lv == CRIT_LEVEL_C) {
5818 +#if BUDGET_ENFORCEMENT_AT_C
5819 +						gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
5820 +#endif
5821 +						res->event_added = 1;
5822 +						res->blocked_by_ghost = 0;
5823 +						res->is_ghost = NO_CPU;
5824 +						res->scheduled_on = state->cpu;
5825 +						return tsk;
5826 +				} else {
5827 +					BUG();
5828 +				}
5829 +			}
5830 +		}
5831 +	}
5832 +	
5833 +	return NULL;
5834 +}
5835 +
5836 +static inline void pre_schedule(struct task_struct *prev, int cpu)
5837 +{
5838 +	TS_SCHED_A_START;
5839 +	TS_SCHED_C_START;
5840 +	
5841 +	if (!prev || !is_realtime(prev))
5842 +		return;
5843 +	
5844 +	do_partition(CRIT_LEVEL_C, cpu);
5845 +}
5846 +
5847 +static inline void post_schedule(struct task_struct *next, int cpu)
5848 +{
5849 +	enum crit_level lev;
5850 +	if ((!next) || !is_realtime(next))
5851 +		return;
5852 +
5853 +	lev = get_task_crit_level(next);
5854 +	do_partition(lev, cpu);
5855 +	
5856 +	switch(lev) {
5857 +		case CRIT_LEVEL_A:
5858 +		case CRIT_LEVEL_B:
5859 +			TS_SCHED_A_END(next);
5860 +			break;
5861 +		case CRIT_LEVEL_C:
5862 +			TS_SCHED_C_END(next);
5863 +			break;
5864 +		default:
5865 +			break;
5866 +	}
5867 +	
5868 +}
5869 +
5870 +/* mc2_schedule - main scheduler function. pick the next task to run
5871 + */
5872 +static struct task_struct* mc2_schedule(struct task_struct * prev)
5873 +{
5874 +	int np, blocks, exists, preempt, to_schedule;
5875 +	/* next == NULL means "schedule background work". */
5876 +	lt_t now;
5877 +	struct mc2_cpu_state *state = local_cpu_state();
5878 +	
5879 +	pre_schedule(prev, state->cpu);
5880 +	
5881 +	raw_spin_lock(&state->lock);
5882 +	
5883 +	if (state->scheduled && state->scheduled != prev)
5884 +		printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null");
5885 +	if (state->scheduled && !is_realtime(prev))
5886 +		printk(KERN_ALERT "BUG2!!!!!!!! \n");
5887 +
5888 +	/* (0) Determine state */
5889 +	exists = state->scheduled != NULL;
5890 +	blocks = exists && !is_current_running();
5891 +	np = exists && is_np(state->scheduled);
5892 +	
5893 +	raw_spin_lock(&_global_env.lock);
5894 +	preempt = resched_cpu[state->cpu];
5895 +	resched_cpu[state->cpu] = 0;
5896 +	raw_spin_unlock(&_global_env.lock);
5897 +
5898 +	/* update time */
5899 +	state->sup_env.will_schedule = true;
5900 +
5901 +	now = litmus_clock();
5902 +	sup_update_time(&state->sup_env, now);
5903 +
5904 +	if (is_realtime(current) && blocks) {
5905 +		if (get_task_crit_level(current) == CRIT_LEVEL_C)
5906 +			raw_spin_lock(&_global_env.lock);
5907 +		task_departs(current, is_completed(current));
5908 +		if (get_task_crit_level(current) == CRIT_LEVEL_C)
5909 +			raw_spin_unlock(&_global_env.lock);
5910 +	}
5911 +	
5912 +	/* figure out what to schedule next */
5913 +	if (!np)
5914 +		state->scheduled = mc2_dispatch(&state->sup_env, state);
5915 +
5916 +	if (!state->scheduled) {
5917 +		raw_spin_lock(&_global_env.lock);
5918 +		to_schedule = gmp_update_time(&_global_env, now);
5919 +		state->scheduled = mc2_global_dispatch(state);
5920 +		_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
5921 +		update_cpu_prio(state);
5922 +		raw_spin_unlock(&_global_env.lock);
5923 +	} else {
5924 +		raw_spin_lock(&_global_env.lock);
5925 +		_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
5926 +		update_cpu_prio(state);
5927 +		raw_spin_unlock(&_global_env.lock);
5928 +	}
5929 +	
5930 +	/* Notify LITMUS^RT core that we've arrived at a scheduling decision. */
5931 +	sched_state_task_picked();
5932 +
5933 +	/* program scheduler timer */
5934 +	state->sup_env.will_schedule = false;
5935 +		
5936 +	/* NOTE: drops state->lock */
5937 +	mc2_update_timer_and_unlock(state);
5938 +
5939 +	if (prev != state->scheduled && is_realtime(prev)) {
5940 +		struct mc2_task_state* tinfo = get_mc2_state(prev);
5941 +		struct reservation* res = tinfo->res_info.client.reservation;
5942 +		TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on);
5943 +		res->scheduled_on = NO_CPU;
5944 +		TRACE_TASK(prev, "descheduled.\n");
5945 +		/* if prev is preempted and a global task, find the lowest cpu and reschedule */
5946 +		if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
5947 +			int cpu;
5948 +			raw_spin_lock(&_global_env.lock);
5949 +			cpu = get_lowest_prio_cpu(res?res->priority:0);
5950 +			TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu);
5951 +			if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
5952 +				_lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
5953 +				resched_cpu[cpu] = 1;
5954 +			}
5955 +			raw_spin_unlock(&_global_env.lock);
5956 +		}
5957 +	}
5958 +	
5959 +	if (to_schedule != 0) {
5960 +		raw_spin_lock(&_global_env.lock);
5961 +		while (to_schedule--) {
5962 +			int cpu = get_lowest_prio_cpu(0);
5963 +			if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
5964 +				_lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
5965 +				resched_cpu[cpu] = 1;
5966 +			}
5967 +		}
5968 +		raw_spin_unlock(&_global_env.lock);	
5969 +	}
5970 +
5971 +	if (state->scheduled) {
5972 +		TRACE_TASK(state->scheduled, "scheduled.\n");
5973 +	}
5974 +	
5975 +	post_schedule(state->scheduled, state->cpu);
5976 +	
5977 +	return state->scheduled;
5978 +}
5979 +
5980 +/* mc2_task_resume - Called when the state of tsk changes back to 
5981 + *                   TASK_RUNNING. We need to requeue the task.
5982 + */
5983 +static void mc2_task_resume(struct task_struct  *tsk)
5984 +{
5985 +	unsigned long flags;
5986 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
5987 +	struct mc2_cpu_state *state;
5988 +
5989 +	TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
5990 +
5991 +	local_irq_save(flags);
5992 +	if (tinfo->cpu != -1)
5993 +		state = cpu_state_for(tinfo->cpu);
5994 +	else
5995 +		state = local_cpu_state();
5996 +
5997 +	/* Requeue only if self-suspension was already processed. */
5998 +	if (tinfo->has_departed)
5999 +	{
6000 +		/* We don't want to consider jobs before synchronous releases */
6001 +		if (tsk_rt(tsk)->job_params.job_no > 5) {
6002 +			switch(get_task_crit_level(tsk)) {
6003 +				case CRIT_LEVEL_A:
6004 +					TS_RELEASE_LATENCY_A(get_release(tsk));
6005 +					break;
6006 +				case CRIT_LEVEL_B:
6007 +					TS_RELEASE_LATENCY_B(get_release(tsk));
6008 +					break;
6009 +				case CRIT_LEVEL_C:
6010 +					TS_RELEASE_LATENCY_C(get_release(tsk));
6011 +					break;
6012 +				default:
6013 +					break;
6014 +			}
6015 +		}
6016 +
6017 +		raw_spin_lock(&state->lock);
6018 +		/* Assumption: litmus_clock() is synchronized across cores,
6019 +		 * since we might not actually be executing on tinfo->cpu
6020 +		 * at the moment. */
6021 +		if (tinfo->cpu != -1) {
6022 +			sup_update_time(&state->sup_env, litmus_clock());
6023 +			task_arrives(state, tsk);
6024 +		} else {
6025 +			raw_spin_lock(&_global_env.lock);
6026 +			gmp_update_time(&_global_env, litmus_clock());
6027 +			task_arrives(state, tsk);
6028 +			raw_spin_unlock(&_global_env.lock);
6029 +		}
6030 +			
6031 +		/* NOTE: drops state->lock */
6032 +		TRACE_TASK(tsk, "mc2_resume()\n");
6033 +		mc2_update_timer_and_unlock(state);	
6034 +	} else {
6035 +		TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
6036 +	}
6037 +
6038 +	local_irq_restore(flags);
6039 +}
6040 +
6041 +
6042 +/* mc2_admit_task - Setup mc2 task parameters
6043 + */
6044 +static long mc2_admit_task(struct task_struct *tsk)
6045 +{
6046 +	long err = -ESRCH;
6047 +	unsigned long flags;
6048 +	struct reservation *res;
6049 +	struct mc2_cpu_state *state;
6050 +	struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC);
6051 +	struct mc2_task *mp = tsk_rt(tsk)->mc2_data;
6052 +	enum crit_level lv;
6053 +	
6054 +	if (!tinfo)
6055 +		return -ENOMEM;
6056 +
6057 +	if (!mp) {
6058 +		printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n");
6059 +		return err;
6060 +	}
6061 +	
6062 +	lv = mp->crit;
6063 +	preempt_disable();
6064 +
6065 +	if (lv < CRIT_LEVEL_C) {
6066 +		state = cpu_state_for(task_cpu(tsk));
6067 +		raw_spin_lock_irqsave(&state->lock, flags);
6068 +
6069 +		res = sup_find_by_id(&state->sup_env, mp->res_id);
6070 +
6071 +		/* found the appropriate reservation */
6072 +		if (res) {
6073 +			TRACE_TASK(tsk, "SUP FOUND RES ID\n");
6074 +			tinfo->mc2_param.crit = mp->crit;
6075 +			tinfo->mc2_param.res_id = mp->res_id;
6076 +		
6077 +			/* initial values */
6078 +			err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
6079 +			tinfo->cpu = task_cpu(tsk);
6080 +			tinfo->has_departed = true;
6081 +			tsk_rt(tsk)->plugin_state = tinfo;
6082 +
6083 +			/* disable LITMUS^RT's per-thread budget enforcement */
6084 +			tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
6085 +		}
6086 +
6087 +		raw_spin_unlock_irqrestore(&state->lock, flags);
6088 +	} else if (lv == CRIT_LEVEL_C) {
6089 +		state = local_cpu_state();
6090 +		raw_spin_lock_irqsave(&state->lock, flags);
6091 +		raw_spin_lock(&_global_env.lock);
6092 +		
6093 +		res = gmp_find_by_id(&_global_env, mp->res_id);
6094 +
6095 +		/* found the appropriate reservation (or vCPU) */
6096 +		if (res) {
6097 +			TRACE_TASK(tsk, "GMP FOUND RES ID\n");
6098 +			tinfo->mc2_param.crit = mp->crit;
6099 +			tinfo->mc2_param.res_id = mp->res_id;
6100 +			
6101 +			/* initial values */
6102 +			err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
6103 +			tinfo->cpu = -1;
6104 +			tinfo->has_departed = true;
6105 +			tsk_rt(tsk)->plugin_state = tinfo;
6106 +
6107 +			/* disable LITMUS^RT's per-thread budget enforcement */
6108 +			tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
6109 +		}
6110 +
6111 +		raw_spin_unlock(&_global_env.lock);
6112 +		raw_spin_unlock_irqrestore(&state->lock, flags);	
6113 +	}
6114 +	
6115 +	preempt_enable();
6116 +
6117 +	if (err)
6118 +		kfree(tinfo);
6119 +
6120 +	return err;
6121 +}
6122 +
6123 +/* mc2_task_new - A new real-time job is arrived. Release the next job
6124 + *                at the next reservation replenish time
6125 + */
6126 +static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
6127 +			  int is_running)
6128 +{
6129 +	unsigned long flags;
6130 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
6131 +	struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu);
6132 +	struct reservation *res;
6133 +	enum crit_level lv = get_task_crit_level(tsk);
6134 +	lt_t release = 0;
6135 +
6136 +	TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
6137 +		   litmus_clock(), on_runqueue, is_running);
6138 +
6139 +	if (tinfo->cpu == -1)
6140 +		state = local_cpu_state();
6141 +	else 
6142 +		state = cpu_state_for(tinfo->cpu);
6143 +
6144 +	
6145 +	if (is_running) {
6146 +		state->scheduled = tsk;
6147 +		/* make sure this task should actually be running */
6148 +		litmus_reschedule_local();
6149 +	}
6150 +
6151 +	/* acquire the lock protecting the state and disable interrupts */
6152 +	local_irq_save(flags);
6153 +	raw_spin_lock(&state->lock);
6154 +
6155 +	if (lv == CRIT_LEVEL_C) {
6156 +		raw_spin_lock(&_global_env.lock);
6157 +		res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id);
6158 +	}
6159 +	else {
6160 +		res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
6161 +	}
6162 +	release = res->next_replenishment;
6163 +	
6164 +	if (on_runqueue || is_running) {
6165 +		/* Assumption: litmus_clock() is synchronized across cores
6166 +		 * [see comment in pres_task_resume()] */
6167 +		if (lv == CRIT_LEVEL_C) {
6168 +			gmp_update_time(&_global_env, litmus_clock());
6169 +		}
6170 +		else
6171 +			sup_update_time(&state->sup_env, litmus_clock());
6172 +		task_arrives(state, tsk);
6173 +		if (lv == CRIT_LEVEL_C)
6174 +			raw_spin_unlock(&_global_env.lock);
6175 +		/* NOTE: drops state->lock */
6176 +		TRACE("mc2_new()\n");
6177 +		
6178 +		mc2_update_timer_and_unlock(state);
6179 +	} else {
6180 +		if (lv == CRIT_LEVEL_C)
6181 +			raw_spin_unlock(&_global_env.lock);
6182 +		raw_spin_unlock(&state->lock);
6183 +	}
6184 +	local_irq_restore(flags);
6185 +	
6186 +	if (!release) {
6187 +		TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release);
6188 +	}
6189 +	else
6190 +		TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n");
6191 +}
6192 +
6193 +/* mc2_reservation_destroy - reservation_destroy system call backend
6194 + */
6195 +static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
6196 +{
6197 +	long ret = -EINVAL;
6198 +	struct mc2_cpu_state *state;
6199 +	struct reservation *res = NULL, *next;
6200 +	struct sup_reservation_environment *sup_env;
6201 +	int found = 0;
6202 +	unsigned long flags;
6203 +	
6204 +	if (cpu == -1) {
6205 +		/* if the reservation is global reservation */
6206 +		local_irq_save(flags);
6207 +		raw_spin_lock(&_global_env.lock);
6208 +		
6209 +		list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) {
6210 +			if (res->id == reservation_id) {
6211 +				list_del(&res->list);
6212 +				kfree(res);
6213 +				found = 1;
6214 +				ret = 0;
6215 +			}
6216 +		}
6217 +		if (!found) {
6218 +			list_for_each_entry_safe(res, next, &_global_env.inactive_reservations, list) {
6219 +				if (res->id == reservation_id) {
6220 +					list_del(&res->list);
6221 +					kfree(res);
6222 +					found = 1;
6223 +					ret = 0;
6224 +				}
6225 +			}
6226 +		}
6227 +		if (!found) {
6228 +			list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
6229 +				if (res->id == reservation_id) {
6230 +					list_del(&res->list);
6231 +					kfree(res);
6232 +					found = 1;
6233 +					ret = 0;
6234 +				}
6235 +			}
6236 +		}
6237 +
6238 +		raw_spin_unlock(&_global_env.lock);
6239 +		local_irq_restore(flags);
6240 +	} else {
6241 +		/* if the reservation is partitioned reservation */
6242 +		state = cpu_state_for(cpu);
6243 +		local_irq_save(flags);
6244 +		raw_spin_lock(&state->lock);
6245 +		
6246 +		sup_env = &state->sup_env;
6247 +		list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
6248 +			if (res->id == reservation_id) {
6249 +				list_del(&res->list);
6250 +				kfree(res);
6251 +				found = 1;
6252 +				ret = 0;
6253 +			}
6254 +		}
6255 +		if (!found) {
6256 +			list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
6257 +				if (res->id == reservation_id) {
6258 +					list_del(&res->list);
6259 +					kfree(res);
6260 +					found = 1;
6261 +					ret = 0;
6262 +				}
6263 +			}
6264 +		}
6265 +		if (!found) {
6266 +			list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
6267 +				if (res->id == reservation_id) {
6268 +					list_del(&res->list);
6269 +					kfree(res);
6270 +					found = 1;
6271 +					ret = 0;
6272 +				}
6273 +			}
6274 +		}
6275 +
6276 +		raw_spin_unlock(&state->lock);
6277 +		local_irq_restore(flags);
6278 +	}
6279 +	
6280 +	TRACE("Rerservation destroyed ret = %d\n", ret);
6281 +	return ret;
6282 +}
6283 +
6284 +/* mc2_task_exit - Task became a normal task (not real-time task)
6285 + */
6286 +static void mc2_task_exit(struct task_struct *tsk)
6287 +{
6288 +	unsigned long flags;
6289 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
6290 +	struct mc2_cpu_state *state;
6291 +	enum crit_level lv = tinfo->mc2_param.crit;
6292 +	struct crit_entry* ce;
6293 +	int cpu;
6294 +
6295 +	local_irq_save(flags);
6296 +	if (tinfo->cpu != -1)
6297 +		state = cpu_state_for(tinfo->cpu);
6298 +	else 
6299 +		state = local_cpu_state();
6300 +	
6301 +	raw_spin_lock(&state->lock);
6302 +	
6303 +	if (state->scheduled == tsk)
6304 +		state->scheduled = NULL;
6305 +
6306 +	ce = &state->crit_entries[lv];
6307 +	if (ce->running == tsk)
6308 +		ce->running = NULL;
6309 +	
6310 +	/* remove from queues */
6311 +	if (is_running(tsk)) {
6312 +		/* Assumption: litmus_clock() is synchronized across cores
6313 +		 * [see comment in pres_task_resume()] */
6314 +		
6315 +		/* update both global and partitioned */
6316 +		if (lv < CRIT_LEVEL_C) {
6317 +			sup_update_time(&state->sup_env, litmus_clock());
6318 +		}
6319 +		else if (lv == CRIT_LEVEL_C) {
6320 +			raw_spin_lock(&_global_env.lock);
6321 +			gmp_update_time(&_global_env, litmus_clock());
6322 +		}
6323 +		task_departs(tsk, 0);
6324 +		if (lv == CRIT_LEVEL_C)
6325 +			raw_spin_unlock(&_global_env.lock);
6326 +		
6327 +		/* NOTE: drops state->lock */
6328 +		TRACE("mc2_exit()\n");
6329 +
6330 +		mc2_update_timer_and_unlock(state);	
6331 +	} else {
6332 +		raw_spin_unlock(&state->lock);
6333 +		
6334 +	}
6335 +
6336 +	if (lv == CRIT_LEVEL_C) {
6337 +		for_each_online_cpu(cpu) {
6338 +			state = cpu_state_for(cpu);
6339 +			if (state == local_cpu_state())
6340 +				continue;
6341 +			raw_spin_lock(&state->lock);
6342 +			
6343 +			if (state->scheduled == tsk)
6344 +				state->scheduled = NULL;
6345 +			
6346 +			ce = &state->crit_entries[lv];
6347 +			if (ce->running == tsk)
6348 +				ce->running = NULL;
6349 +			
6350 +			raw_spin_unlock(&state->lock);
6351 +		}
6352 +	}
6353 +	
6354 +	local_irq_restore(flags);
6355 +	
6356 +	kfree(tsk_rt(tsk)->plugin_state);
6357 +	tsk_rt(tsk)->plugin_state = NULL;
6358 +	kfree(tsk_rt(tsk)->mc2_data);
6359 +	tsk_rt(tsk)->mc2_data = NULL;
6360 +}
6361 +
6362 +/* create_polling_reservation - create a new polling reservation
6363 + */
6364 +static long create_polling_reservation(
6365 +	int res_type,
6366 +	struct reservation_config *config)
6367 +{
6368 +	struct mc2_cpu_state *state;
6369 +	struct reservation* res;
6370 +	struct polling_reservation *pres;
6371 +	unsigned long flags;
6372 +	int use_edf  = config->priority == LITMUS_NO_PRIORITY;
6373 +	int periodic =  res_type == PERIODIC_POLLING;
6374 +	long err = -EINVAL;
6375 +
6376 +	/* sanity checks */
6377 +	if (config->polling_params.budget >
6378 +	    config->polling_params.period) {
6379 +		printk(KERN_ERR "invalid polling reservation (%u): "
6380 +		       "budget > period\n", config->id);
6381 +		return -EINVAL;
6382 +	}
6383 +	if (config->polling_params.budget >
6384 +	    config->polling_params.relative_deadline
6385 +	    && config->polling_params.relative_deadline) {
6386 +		printk(KERN_ERR "invalid polling reservation (%u): "
6387 +		       "budget > deadline\n", config->id);
6388 +		return -EINVAL;
6389 +	}
6390 +	if (config->polling_params.offset >
6391 +	    config->polling_params.period) {
6392 +		printk(KERN_ERR "invalid polling reservation (%u): "
6393 +		       "offset > period\n", config->id);
6394 +		return -EINVAL;
6395 +	}
6396 +
6397 +	/* Allocate before we grab a spin lock.
6398 +	 * Todo: would be nice to use a core-local allocation.
6399 +	 */
6400 +	pres = kzalloc(sizeof(*pres), GFP_KERNEL);
6401 +	if (!pres)
6402 +		return -ENOMEM;
6403 +
6404 +	if (config->cpu != -1) {
6405 +		state = cpu_state_for(config->cpu);
6406 +		raw_spin_lock_irqsave(&state->lock, flags);
6407 +
6408 +		res = sup_find_by_id(&state->sup_env, config->id);
6409 +		if (!res) {
6410 +			polling_reservation_init(pres, use_edf, periodic,
6411 +				config->polling_params.budget,
6412 +				config->polling_params.period,
6413 +				config->polling_params.relative_deadline,
6414 +				config->polling_params.offset);
6415 +			pres->res.id = config->id;
6416 +			pres->res.blocked_by_ghost = 0;
6417 +			pres->res.is_ghost = NO_CPU;
6418 +			if (!use_edf)
6419 +				pres->res.priority = config->priority;
6420 +			sup_add_new_reservation(&state->sup_env, &pres->res);
6421 +			err = config->id;
6422 +			TRACE_CUR("reservation created R%d priority : %llu\n", config->id, pres->res.priority);
6423 +		} else {
6424 +			err = -EEXIST;
6425 +		}
6426 +
6427 +		raw_spin_unlock_irqrestore(&state->lock, flags);
6428 +
6429 +	} else {
6430 +		raw_spin_lock_irqsave(&_global_env.lock, flags);
6431 +		
6432 +		res = gmp_find_by_id(&_global_env, config->id);
6433 +		if (!res) {
6434 +			polling_reservation_init(pres, use_edf, periodic,
6435 +				config->polling_params.budget,
6436 +				config->polling_params.period,
6437 +				config->polling_params.relative_deadline,
6438 +				config->polling_params.offset);
6439 +			pres->res.id = config->id;
6440 +			pres->res.blocked_by_ghost = 0;
6441 +			pres->res.scheduled_on = NO_CPU;
6442 +			pres->res.is_ghost = NO_CPU;
6443 +			if (!use_edf)
6444 +				pres->res.priority = config->priority;
6445 +			gmp_add_new_reservation(&_global_env, &pres->res);
6446 +			err = config->id;
6447 +		} else {
6448 +			err = -EEXIST;
6449 +		}
6450 +		raw_spin_unlock_irqrestore(&_global_env.lock, flags);		
6451 +	}
6452 +	
6453 +	if (err < 0)
6454 +		kfree(pres);
6455 +
6456 +	return err;
6457 +}
6458 +
6459 +#define MAX_INTERVALS 1024
6460 +
6461 +/* create_table_driven_reservation - create a table_driven reservation
6462 + */
6463 +static long create_table_driven_reservation(
6464 +	struct reservation_config *config)
6465 +{
6466 +	struct mc2_cpu_state *state;
6467 +	struct reservation* res;
6468 +	struct table_driven_reservation *td_res = NULL;
6469 +	struct lt_interval *slots = NULL;
6470 +	size_t slots_size;
6471 +	unsigned int i, num_slots;
6472 +	unsigned long flags;
6473 +	long err = -EINVAL;
6474 +
6475 +
6476 +	if (!config->table_driven_params.num_intervals) {
6477 +		printk(KERN_ERR "invalid table-driven reservation (%u): "
6478 +		       "no intervals\n", config->id);
6479 +		return -EINVAL;
6480 +	}
6481 +
6482 +	if (config->table_driven_params.num_intervals > MAX_INTERVALS) {
6483 +		printk(KERN_ERR "invalid table-driven reservation (%u): "
6484 +		       "too many intervals (max: %d)\n", config->id, MAX_INTERVALS);
6485 +		return -EINVAL;
6486 +	}
6487 +
6488 +	num_slots = config->table_driven_params.num_intervals;
6489 +	slots_size = sizeof(slots[0]) * num_slots;
6490 +	slots = kzalloc(slots_size, GFP_KERNEL);
6491 +	if (!slots)
6492 +		return -ENOMEM;
6493 +
6494 +	td_res = kzalloc(sizeof(*td_res), GFP_KERNEL);
6495 +	if (!td_res)
6496 +		err = -ENOMEM;
6497 +	else
6498 +		err = copy_from_user(slots,
6499 +			config->table_driven_params.intervals, slots_size);
6500 +
6501 +	if (!err) {
6502 +		/* sanity checks */
6503 +		for (i = 0; !err && i < num_slots; i++)
6504 +			if (slots[i].end <= slots[i].start) {
6505 +				printk(KERN_ERR
6506 +				       "invalid table-driven reservation (%u): "
6507 +				       "invalid interval %u => [%llu, %llu]\n",
6508 +				       config->id, i,
6509 +				       slots[i].start, slots[i].end);
6510 +				err = -EINVAL;
6511 +			}
6512 +
6513 +		for (i = 0; !err && i + 1 < num_slots; i++)
6514 +			if (slots[i + 1].start <= slots[i].end) {
6515 +				printk(KERN_ERR
6516 +				       "invalid table-driven reservation (%u): "
6517 +				       "overlapping intervals %u, %u\n",
6518 +				       config->id, i, i + 1);
6519 +				err = -EINVAL;
6520 +			}
6521 +
6522 +		if (slots[num_slots - 1].end >
6523 +			config->table_driven_params.major_cycle_length) {
6524 +			printk(KERN_ERR
6525 +				"invalid table-driven reservation (%u): last "
6526 +				"interval ends past major cycle %llu > %llu\n",
6527 +				config->id,
6528 +				slots[num_slots - 1].end,
6529 +				config->table_driven_params.major_cycle_length);
6530 +			err = -EINVAL;
6531 +		}
6532 +	}
6533 +
6534 +	if (!err) {
6535 +		state = cpu_state_for(config->cpu);
6536 +		raw_spin_lock_irqsave(&state->lock, flags);
6537 +
6538 +		res = sup_find_by_id(&state->sup_env, config->id);
6539 +		if (!res) {
6540 +			table_driven_reservation_init(td_res,
6541 +				config->table_driven_params.major_cycle_length,
6542 +				slots, num_slots);
6543 +			td_res->res.id = config->id;
6544 +			td_res->res.priority = config->priority;
6545 +			td_res->res.blocked_by_ghost = 0;
6546 +			sup_add_new_reservation(&state->sup_env, &td_res->res);
6547 +			err = config->id;
6548 +		} else {
6549 +			err = -EEXIST;
6550 +		}
6551 +
6552 +		raw_spin_unlock_irqrestore(&state->lock, flags);
6553 +	}
6554 +
6555 +	if (err < 0) {
6556 +		kfree(slots);
6557 +		kfree(td_res);
6558 +	}
6559 +
6560 +	return err;
6561 +}
6562 +
6563 +/* mc2_reservation_create - reservation_create system call backend
6564 + */
6565 +static long mc2_reservation_create(int res_type, void* __user _config)
6566 +{
6567 +	long ret = -EINVAL;
6568 +	struct reservation_config config;
6569 +
6570 +	TRACE("Attempt to create reservation (%d)\n", res_type);
6571 +
6572 +	if (copy_from_user(&config, _config, sizeof(config)))
6573 +		return -EFAULT;
6574 +
6575 +	if (config.cpu != -1) {
6576 +		if (config.cpu < 0 || !cpu_online(config.cpu)) {
6577 +			printk(KERN_ERR "invalid polling reservation (%u): "
6578 +				   "CPU %d offline\n", config.id, config.cpu);
6579 +			return -EINVAL;
6580 +		}
6581 +	}
6582 +
6583 +	switch (res_type) {
6584 +		case PERIODIC_POLLING:
6585 +		case SPORADIC_POLLING:
6586 +			ret = create_polling_reservation(res_type, &config);
6587 +			break;
6588 +
6589 +		case TABLE_DRIVEN:
6590 +			ret = create_table_driven_reservation(&config);
6591 +			break;
6592 +
6593 +		default:
6594 +			return -EINVAL;
6595 +	};
6596 +
6597 +	return ret;
6598 +}
6599 +
6600 +static struct domain_proc_info mc2_domain_proc_info;
6601 +
6602 +static long mc2_get_domain_proc_info(struct domain_proc_info **ret)
6603 +{
6604 +	*ret = &mc2_domain_proc_info;
6605 +	return 0;
6606 +}
6607 +
6608 +static void mc2_setup_domain_proc(void)
6609 +{
6610 +	int i, cpu;
6611 +	int num_rt_cpus = num_online_cpus();
6612 +
6613 +	struct cd_mapping *cpu_map, *domain_map;
6614 +
6615 +	memset(&mc2_domain_proc_info, sizeof(mc2_domain_proc_info), 0);
6616 +	init_domain_proc_info(&mc2_domain_proc_info, num_rt_cpus, num_rt_cpus);
6617 +	mc2_domain_proc_info.num_cpus = num_rt_cpus;
6618 +	mc2_domain_proc_info.num_domains = num_rt_cpus;
6619 +
6620 +	i = 0;
6621 +	for_each_online_cpu(cpu) {
6622 +		cpu_map = &mc2_domain_proc_info.cpu_to_domains[i];
6623 +		domain_map = &mc2_domain_proc_info.domain_to_cpus[i];
6624 +
6625 +		cpu_map->id = cpu;
6626 +		domain_map->id = i;
6627 +		cpumask_set_cpu(i, cpu_map->mask);
6628 +		cpumask_set_cpu(cpu, domain_map->mask);
6629 +		++i;
6630 +	}
6631 +}
6632 +
6633 +static long mc2_activate_plugin(void)
6634 +{
6635 +	int cpu, lv;
6636 +	struct mc2_cpu_state *state;
6637 +	struct cpu_entry *ce;
6638 +
6639 +	gmp_init(&_global_env);
6640 +	raw_spin_lock_init(&_lowest_prio_cpu.lock);
6641 +	
6642 +	for_each_online_cpu(cpu) {
6643 +		TRACE("Initializing CPU%d...\n", cpu);
6644 +
6645 +		resched_cpu[cpu] = 0;
6646 +		level_a_priorities[cpu] = 0;
6647 +		state = cpu_state_for(cpu);
6648 +		ce = &_lowest_prio_cpu.cpu_entries[cpu];
6649 +		
6650 +		ce->cpu = cpu;
6651 +		ce->scheduled = NULL;
6652 +		ce->deadline = ULLONG_MAX;
6653 +		ce->lv = NUM_CRIT_LEVELS;
6654 +		ce->will_schedule = false;
6655 +
6656 +		raw_spin_lock_init(&state->lock);
6657 +		state->cpu = cpu;
6658 +		state->scheduled = NULL;
6659 +		for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
6660 +			struct crit_entry *cr_entry = &state->crit_entries[lv];
6661 +			cr_entry->level = lv;
6662 +			cr_entry->running = NULL;
6663 +		}
6664 +		sup_init(&state->sup_env);
6665 +
6666 +		hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
6667 +		state->timer.function = on_scheduling_timer;
6668 +	}
6669 +
6670 +	mc2_setup_domain_proc();
6671 +
6672 +	return 0;
6673 +}
6674 +
6675 +static void mc2_finish_switch(struct task_struct *prev)
6676 +{
6677 +	int cpus;
6678 +	enum crit_level lv = get_task_crit_level(prev);
6679 +	struct mc2_cpu_state *state = local_cpu_state();
6680 +	
6681 +	state->scheduled = is_realtime(current) ? current : NULL;
6682 +	if (lv == CRIT_LEVEL_C) {
6683 +		for (cpus = 0; cpus<NR_CPUS; cpus++) {
6684 +			if (resched_cpu[cpus]) {
6685 +				litmus_reschedule(cpus);
6686 +			}
6687 +		}
6688 +	}
6689 +}
6690 +
6691 +static long mc2_deactivate_plugin(void)
6692 +{
6693 +	int cpu;
6694 +	struct mc2_cpu_state *state;
6695 +	struct reservation *res;
6696 +	struct next_timer_event *event;
6697 +	struct cpu_entry *ce;
6698 +	
6699 +	for_each_online_cpu(cpu) {
6700 +		state = cpu_state_for(cpu);
6701 +		raw_spin_lock(&state->lock);
6702 +
6703 +		hrtimer_cancel(&state->timer);
6704 +
6705 +		ce = &_lowest_prio_cpu.cpu_entries[cpu];
6706 +		
6707 +		ce->cpu = cpu;
6708 +		ce->scheduled = NULL;
6709 +		ce->deadline = ULLONG_MAX;
6710 +		ce->lv = NUM_CRIT_LEVELS;
6711 +		ce->will_schedule = false;
6712 +
6713 +		/* Delete all reservations --- assumes struct reservation
6714 +		 * is prefix of containing struct. */
6715 +
6716 +		while (!list_empty(&state->sup_env.active_reservations)) {
6717 +			res = list_first_entry(
6718 +				&state->sup_env.active_reservations,
6719 +			        struct reservation, list);
6720 +			list_del(&res->list);
6721 +			kfree(res);
6722 +		}
6723 +
6724 +		while (!list_empty(&state->sup_env.inactive_reservations)) {
6725 +			res = list_first_entry(
6726 +				&state->sup_env.inactive_reservations,
6727 +			        struct reservation, list);
6728 +			list_del(&res->list);
6729 +			kfree(res);
6730 +		}
6731 +
6732 +		while (!list_empty(&state->sup_env.depleted_reservations)) {
6733 +			res = list_first_entry(
6734 +				&state->sup_env.depleted_reservations,
6735 +			        struct reservation, list);
6736 +			list_del(&res->list);
6737 +			kfree(res);
6738 +		}
6739 +
6740 +		raw_spin_unlock(&state->lock);
6741 +	}
6742 +
6743 +	raw_spin_lock(&_global_env.lock);
6744 +	
6745 +	while (!list_empty(&_global_env.active_reservations)) {
6746 +		res = list_first_entry(
6747 +			&_global_env.active_reservations,
6748 +				struct reservation, list);
6749 +		list_del(&res->list);
6750 +		kfree(res);
6751 +	}
6752 +
6753 +	while (!list_empty(&_global_env.inactive_reservations)) {
6754 +		res = list_first_entry(
6755 +			&_global_env.inactive_reservations,
6756 +				struct reservation, list);
6757 +		list_del(&res->list);
6758 +		kfree(res);
6759 +	}
6760 +
6761 +	while (!list_empty(&_global_env.depleted_reservations)) {
6762 +		res = list_first_entry(
6763 +			&_global_env.depleted_reservations,
6764 +				struct reservation, list);
6765 +		list_del(&res->list);
6766 +		kfree(res);
6767 +	}
6768 +	
6769 +	while (!list_empty(&_global_env.next_events)) {
6770 +		event = list_first_entry(
6771 +			&_global_env.next_events,
6772 +				struct next_timer_event, list);
6773 +		list_del(&event->list);
6774 +		kfree(event);
6775 +	}
6776 +	
6777 +	raw_spin_unlock(&_global_env.lock);
6778 +	
6779 +	destroy_domain_proc_info(&mc2_domain_proc_info);
6780 +	return 0;
6781 +}
6782 +
6783 +static struct sched_plugin mc2_plugin = {
6784 +	.plugin_name			= "MC2",
6785 +	.schedule				= mc2_schedule,
6786 +	.finish_switch			= mc2_finish_switch,
6787 +	.task_wake_up			= mc2_task_resume,
6788 +	.admit_task				= mc2_admit_task,
6789 +	.task_new				= mc2_task_new,
6790 +	.task_exit				= mc2_task_exit,
6791 +	.complete_job           = mc2_complete_job,
6792 +	.get_domain_proc_info   = mc2_get_domain_proc_info,
6793 +	.activate_plugin		= mc2_activate_plugin,
6794 +	.deactivate_plugin      = mc2_deactivate_plugin,
6795 +	.reservation_create     = mc2_reservation_create,
6796 +	.reservation_destroy	= mc2_reservation_destroy,
6797 +};
6798 +
6799 +static int __init init_mc2(void)
6800 +{
6801 +	return register_sched_plugin(&mc2_plugin);
6802 +}
6803 +
6804 +module_init(init_mc2);
6805 diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
6806 index edd91e9..7b1eba0 100644
6807 --- a/litmus/sched_plugin.c
6808 +++ b/litmus/sched_plugin.c
6809 @@ -13,6 +13,7 @@
6810  #include <litmus/sched_plugin.h>
6811  #include <litmus/preempt.h>
6812  #include <litmus/jobs.h>
6813 +#include <litmus/budget.h>
6814  
6815  /*
6816   * Generic function to trigger preemption on either local or remote cpu
6817 @@ -197,6 +198,9 @@ int register_sched_plugin(struct sched_plugin* plugin)
6818  	if (!plugin->wait_for_release_at)
6819  		plugin->wait_for_release_at = default_wait_for_release_at;
6820  
6821 +	if (!plugin->current_budget)
6822 +		plugin->current_budget = litmus_current_budget;
6823 +
6824  	raw_spin_lock(&sched_plugins_lock);
6825  	list_add(&plugin->list, &sched_plugins);
6826  	raw_spin_unlock(&sched_plugins_lock);
6827 diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
6828 index 2549a3f..216b9f3 100644
6829 --- a/litmus/sched_psn_edf.c
6830 +++ b/litmus/sched_psn_edf.c
6831 @@ -23,6 +23,10 @@
6832  #include <litmus/sched_trace.h>
6833  #include <litmus/trace.h>
6834  
6835 +#ifdef CONFIG_PGMRT_SUPPORT
6836 +#include <litmus/pgm.h>
6837 +#endif
6838 +
6839  /* to set up domain/cpu mappings */
6840  #include <litmus/litmus_proc.h>
6841  
6842 @@ -199,6 +203,62 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
6843  	 */
6844  	resched = preempt;
6845  
6846 +#ifdef CONFIG_PGMRT_SUPPORT
6847 +	if (exists) {
6848 +		if (is_pgm_sending(pedf->scheduled)) {
6849 +			if (!is_pgm_satisfied(pedf->scheduled)) {
6850 +				if (!is_priority_boosted(pedf->scheduled)) {
6851 +					TRACE_TASK(pedf->scheduled, "is sending PGM tokens and needs boosting.\n");
6852 +					BUG_ON(is_pgm_satisfied(pedf->scheduled));
6853 +
6854 +					/* We are either sending tokens or waiting for tokes.
6855 +					   If waiting: Boost priority so we'll be scheduled
6856 +						immediately when needed tokens arrive.
6857 +					   If sending: Boost priority so no one (specifically, our
6858 +						consumers) will preempt us while signalling the token
6859 +						transmission.
6860 +					*/
6861 +					tsk_rt(pedf->scheduled)->priority_boosted = 1;
6862 +					tsk_rt(pedf->scheduled)->boost_start_time = litmus_clock();
6863 +
6864 +					if (likely(!blocks)) {
6865 +						requeue(pedf->scheduled, edf);
6866 +						/* we may regain the processor */
6867 +						if (preempt) {
6868 +							preempt = edf_preemption_needed(edf, prev);
6869 +							if (!preempt) {
6870 +								TRACE_TASK(pedf->scheduled, "blocked preemption by lazy boosting.\n");
6871 +							}
6872 +						}
6873 +					}
6874 +				}
6875 +			}
6876 +			else { /* sending is satisfied */
6877 +				tsk_rt(pedf->scheduled)->ctrl_page->pgm_sending = 0;
6878 +				tsk_rt(pedf->scheduled)->ctrl_page->pgm_satisfied = 0;
6879 +
6880 +				if (is_priority_boosted(pedf->scheduled)) {
6881 +					TRACE_TASK(pedf->scheduled,
6882 +							"is done sending PGM tokens must relinquish boosting.\n");
6883 +					/* clear boosting */
6884 +					tsk_rt(pedf->scheduled)->priority_boosted = 0;
6885 +					if(likely(!blocks)) {
6886 +						/* recheck priority */
6887 +						requeue(pedf->scheduled, edf);
6888 +						/* we may lose the processor */
6889 +						if (!preempt) {
6890 +							preempt = edf_preemption_needed(edf, prev);
6891 +							if (preempt) {
6892 +								TRACE_TASK(pedf->scheduled, "preempted by lazy unboosting.\n");
6893 +							}
6894 +						}
6895 +					}
6896 +				}
6897 +			}
6898 +		}
6899 +	}
6900 +#endif
6901 +	
6902  	/* If a task blocks we have no choice but to reschedule.
6903  	 */
6904  	if (blocks)
6905 @@ -243,7 +303,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
6906  	if (next) {
6907  		TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
6908  	} else {
6909 -		TRACE("becoming idle at %llu\n", litmus_clock());
6910 +		; //TRACE("becoming idle at %llu\n", litmus_clock());
6911  	}
6912  
6913  	pedf->scheduled = next;
6914 @@ -644,10 +704,14 @@ static long psnedf_admit_task(struct task_struct* tsk)
6915  	    /* don't allow tasks on release master CPU */
6916  	     && task_cpu(tsk) != remote_edf(task_cpu(tsk))->release_master
6917  #endif
6918 -		)
6919 +		) {
6920 +		TRACE_TASK(tsk, "admitted\n");
6921  		return 0;
6922 -	else
6923 +	}
6924 +	else {
6925 +		TRACE_TASK(tsk, "not admitted\n");
6926  		return -EINVAL;
6927 +	}
6928  }
6929  
6930  /*	Plugin object	*/
6931 diff --git a/mm/migrate.c b/mm/migrate.c
6932 index f53838f..8dd685b 100644
6933 --- a/mm/migrate.c
6934 +++ b/mm/migrate.c
6935 @@ -38,6 +38,8 @@
6936  #include <linux/balloon_compaction.h>
6937  #include <linux/mmu_notifier.h>
6938  
6939 +#include <litmus/litmus.h> // for TRACE_TASK
6940 +
6941  #include <asm/tlbflush.h>
6942  
6943  #define CREATE_TRACE_POINTS
6944 @@ -391,6 +393,65 @@ int migrate_page_move_mapping(struct address_space *mapping,
6945  }
6946  
6947  /*
6948 + * Replace the page in the mapping.
6949 + *
6950 + * The number of remaining references must be:
6951 + * 1 for anonymous pages without a mapping
6952 + * 2 for pages with a mapping
6953 + * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
6954 + */
6955 +int replicate_page_move_mapping(struct address_space *mapping,
6956 +		struct page *newpage, struct page *page,
6957 +		struct buffer_head *head, enum migrate_mode mode,
6958 +		int extra_count)
6959 +{
6960 +	int prev_count = page_count(page);
6961 +	void **pslot;
6962 +
6963 +	BUG_ON(!mapping);
6964 +
6965 +	spin_lock_irq(&mapping->tree_lock);
6966 +
6967 +	pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page));
6968 +
6969 +	/*
6970 +	 * Now we know that no one else is looking at the page.
6971 +	 */
6972 +	get_page(newpage);	/* add cache reference */
6973 +	if (PageSwapCache(page)) {
6974 +		SetPageSwapCache(newpage);
6975 +		set_page_private(newpage, page_private(page));
6976 +	}
6977 +
6978 +	/*
6979 +	 * Drop cache reference from old page by unfreezing
6980 +	 * to the previous reference.
6981 +	 * We know this isn't the last reference.
6982 +	 */
6983 +	page_unfreeze_refs(page, prev_count);
6984 +	
6985 +	/*
6986 +	 * If moved to a different zone then also account
6987 +	 * the page for that zone. Other VM counters will be
6988 +	 * taken care of when we establish references to the
6989 +	 * new page and drop references to the old page.
6990 +	 *
6991 +	 * Note that anonymous pages are accounted for
6992 +	 * via NR_FILE_PAGES and NR_ANON_PAGES if they
6993 +	 * are mapped to swap space.
6994 +	 */
6995 +	__dec_zone_page_state(page, NR_FILE_PAGES);
6996 +	__inc_zone_page_state(newpage, NR_FILE_PAGES);
6997 +	if (!PageSwapCache(page) && PageSwapBacked(page)) {
6998 +		__dec_zone_page_state(page, NR_SHMEM);
6999 +		__inc_zone_page_state(newpage, NR_SHMEM);
7000 +	}
7001 +	spin_unlock_irq(&mapping->tree_lock);
7002 +
7003 +	return MIGRATEPAGE_SUCCESS;
7004 +}
7005 +
7006 +/*
7007   * The expected number of remaining references is the same as that
7008   * of migrate_page_move_mapping().
7009   */
7010 @@ -550,6 +611,62 @@ void migrate_page_copy(struct page *newpage, struct page *page)
7011  		end_page_writeback(newpage);
7012  }
7013  
7014 +/*
7015 + * Copy the page to its new location
7016 + */
7017 +void replicate_page_copy(struct page *newpage, struct page *page)
7018 +{
7019 +	if (PageHuge(page) || PageTransHuge(page))
7020 +		copy_huge_page(newpage, page);
7021 +	else
7022 +		copy_highpage(newpage, page);
7023 +
7024 +	if (PageError(page))
7025 +		SetPageError(newpage);
7026 +	if (PageReferenced(page))
7027 +		SetPageReferenced(newpage);
7028 +	if (PageUptodate(page))
7029 +		SetPageUptodate(newpage);
7030 +	if (PageActive(page)) {
7031 +		VM_BUG_ON_PAGE(PageUnevictable(page), page);
7032 +		SetPageActive(newpage);
7033 +	} else if (PageUnevictable(page))
7034 +		SetPageUnevictable(newpage);
7035 +	if (PageChecked(page))
7036 +		SetPageChecked(newpage);
7037 +	if (PageMappedToDisk(page))
7038 +		SetPageMappedToDisk(newpage);
7039 +
7040 +	if (PageDirty(page)) {
7041 +		BUG();
7042 + 	}
7043 +
7044 +	/*
7045 +	 * Copy NUMA information to the new page, to prevent over-eager
7046 +	 * future migrations of this same page.
7047 +	 */
7048 +#ifdef CONFIG_NUMA_BALANCING
7049 +	BUG();
7050 +#endif
7051 +
7052 +	if (PageMlocked(page)) {
7053 +		unsigned long flags;
7054 +		int nr_pages = hpage_nr_pages(page);
7055 +		
7056 +		local_irq_save(flags);
7057 +		SetPageMlocked(newpage);
7058 +		__mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
7059 +		local_irq_restore(flags);
7060 +	}
7061 +
7062 +	/*
7063 +	 * If any waiters have accumulated on the new page then
7064 +	 * wake them up.
7065 +	 */
7066 +	if (PageWriteback(newpage))
7067 +		end_page_writeback(newpage);
7068 +}
7069 +
7070  /************************************************************
7071   *                    Migration functions
7072   ***********************************************************/
7073 @@ -578,6 +695,23 @@ int migrate_page(struct address_space *mapping,
7074  }
7075  EXPORT_SYMBOL(migrate_page);
7076  
7077 +int replicate_page(struct address_space *mapping,
7078 +		struct page *newpage, struct page *page,
7079 +		enum migrate_mode mode, int has_replica)
7080 +{
7081 +	int rc, extra_count = 0;
7082 +
7083 +	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
7084 +
7085 +	rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count);
7086 +	if (rc != MIGRATEPAGE_SUCCESS)
7087 +		return rc;
7088 +
7089 +	if (has_replica == 0)
7090 +		replicate_page_copy(newpage, page);
7091 +	return MIGRATEPAGE_SUCCESS;
7092 +}
7093 +
7094  #ifdef CONFIG_BLOCK
7095  /*
7096   * Migration function for pages with buffers. This function can only be used
7097 @@ -638,6 +772,8 @@ int buffer_migrate_page(struct address_space *mapping,
7098  EXPORT_SYMBOL(buffer_migrate_page);
7099  #endif
7100  
7101 +extern struct list_head shared_lib_pages;
7102 +
7103  /*
7104   * Writeback a page to clean the dirty state
7105   */
7106 @@ -763,6 +899,64 @@ static int move_to_new_page(struct page *newpage, struct page *page,
7107  	return rc;
7108  }
7109  
7110 +/*
7111 + * Copy a page to a newly allocated page
7112 + * The page is locked and all ptes have been successfully removed.
7113 + *
7114 + * The new page will have replaced the old page if this function
7115 + * is successful.
7116 + *
7117 + * Return value:
7118 + *   < 0 - error code
7119 + *  MIGRATEPAGE_SUCCESS - success
7120 + */
7121 +static int copy_to_new_page(struct page *newpage, struct page *page,
7122 +				int page_was_mapped, enum migrate_mode mode,
7123 +				int has_replica)
7124 +{
7125 +	struct address_space *mapping;
7126 +	int rc;
7127 +
7128 +	/*
7129 +	 * Block others from accessing the page when we get around to
7130 +	 * establishing additional references. We are the only one
7131 +	 * holding a reference to the new page at this point.
7132 +	 */
7133 +	if (!trylock_page(newpage))
7134 +		BUG();
7135 +
7136 +	/* Prepare mapping for the new page.*/
7137 +	newpage->index = page->index;
7138 +	newpage->mapping = page->mapping;
7139 +	if (PageSwapBacked(page))
7140 +		SetPageSwapBacked(newpage);
7141 +
7142 +	mapping = page_mapping(page);
7143 +	if (!mapping) {
7144 +		/* a shared library page must have a mapping. */
7145 +		BUG();
7146 +	}
7147 +	else if (mapping->a_ops->migratepage) {
7148 +		rc = replicate_page(mapping, newpage, page, mode, has_replica);
7149 +	}
7150 +	else {
7151 +		rc = fallback_migrate_page(mapping, newpage, page, mode);
7152 +	}
7153 +
7154 +	if (rc != MIGRATEPAGE_SUCCESS) {
7155 +		newpage->mapping = NULL;
7156 +	} else {
7157 +		if (page_was_mapped) {
7158 +			remove_migration_ptes(page, newpage);
7159 +		}
7160 +	}
7161 +
7162 +	unlock_page(newpage);
7163 +
7164 +	return rc;
7165 +}
7166 +
7167 +
7168  static int __unmap_and_move(struct page *page, struct page *newpage,
7169  				int force, enum migrate_mode mode)
7170  {
7171 @@ -901,6 +1095,106 @@ out:
7172  	return rc;
7173  }
7174  
7175 +static int __unmap_and_copy(struct page *page, struct page *newpage,
7176 +				int force, enum migrate_mode mode, int has_replica)
7177 +{
7178 +	int rc = -EAGAIN;
7179 +	int ttu_ret = SWAP_AGAIN;
7180 +	int page_was_mapped = 0;
7181 +	struct anon_vma *anon_vma = NULL;
7182 +
7183 +	if (!trylock_page(page)) {
7184 +		if (!force || mode == MIGRATE_ASYNC)
7185 +			goto out;
7186 +
7187 +		/*
7188 +		 * It's not safe for direct compaction to call lock_page.
7189 +		 * For example, during page readahead pages are added locked
7190 +		 * to the LRU. Later, when the IO completes the pages are
7191 +		 * marked uptodate and unlocked. However, the queueing
7192 +		 * could be merging multiple pages for one bio (e.g.
7193 +		 * mpage_readpages). If an allocation happens for the
7194 +		 * second or third page, the process can end up locking
7195 +		 * the same page twice and deadlocking. Rather than
7196 +		 * trying to be clever about what pages can be locked,
7197 +		 * avoid the use of lock_page for direct compaction
7198 +		 * altogether.
7199 +		 */
7200 +		if (current->flags & PF_MEMALLOC)
7201 +			goto out;
7202 +
7203 +		lock_page(page);
7204 +	}
7205 +
7206 +	if (PageWriteback(page)) {
7207 +		/*
7208 +		 * The code of shared library cannot be written.
7209 +		 */
7210 +		BUG();
7211 +	}
7212 +
7213 +	if (PageAnon(page) && !PageKsm(page)) {
7214 +		/* The shared library pages must be backed by a file. */
7215 +		BUG();
7216 +	}
7217 +
7218 +	if (unlikely(isolated_balloon_page(page))) {
7219 +		BUG();
7220 +	}
7221 +
7222 +	/*
7223 +	 * Corner case handling:
7224 +	 * 1. When a new swap-cache page is read into, it is added to the LRU
7225 +	 * and treated as swapcache but it has no rmap yet.
7226 +	 * Calling try_to_unmap() against a page->mapping==NULL page will
7227 +	 * trigger a BUG.  So handle it here.
7228 +	 * 2. An orphaned page (see truncate_complete_page) might have
7229 +	 * fs-private metadata. The page can be picked up due to memory
7230 +	 * offlining.  Everywhere else except page reclaim, the page is
7231 +	 * invisible to the vm, so the page can not be migrated.  So try to
7232 +	 * free the metadata, so the page can be freed.
7233 +	 */
7234 +	if (!page->mapping) {
7235 +		VM_BUG_ON_PAGE(PageAnon(page), page);
7236 +		if (page_has_private(page)) {
7237 +			try_to_free_buffers(page);
7238 +			goto out_unlock;
7239 +		}
7240 +		goto skip_unmap;
7241 +	}
7242 +
7243 +	/* Establish migration ptes or remove ptes */
7244 +	if (page_mapped(page)) {
7245 +		struct rmap_walk_control rwc = {
7246 +			.rmap_one = try_to_unmap_one_only,
7247 +			.arg = (void *)(TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS),
7248 +		};
7249 +		ttu_ret = rmap_walk(page, &rwc);
7250 +		
7251 +		page_was_mapped = 1;
7252 +	}
7253 +
7254 +skip_unmap:
7255 +	if (ttu_ret == SWAP_SUCCESS) {
7256 +		rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica);
7257 +	} else if (ttu_ret == SWAP_AGAIN)
7258 +		printk(KERN_ERR "rmap_walk returned SWAP_AGAIN\n");
7259 +	else
7260 +		printk(KERN_ERR "rmap_walk failed\n");
7261 +
7262 +	if (rc && page_was_mapped)
7263 +		remove_migration_ptes(page, page);
7264 +
7265 +	/* Drop an anon_vma reference if we took one */
7266 +	if (anon_vma)
7267 +		put_anon_vma(anon_vma);
7268 +
7269 +out_unlock:
7270 +	unlock_page(page);
7271 +out:
7272 +	return rc;
7273 +}
7274 +
7275  /*
7276   * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
7277   * around it.
7278 @@ -976,6 +1270,97 @@ out:
7279  }
7280  
7281  /*
7282 + * Obtain the lock on page, remove all ptes.
7283 + * 1) If r_pfn == INVALID_PFN, then copy the page to the newly allocated page in newpage.
7284 + * 2) If r_pfn != INVALID_PFN, then unmap and modify ptes.
7285 + */
7286 +#include <litmus/replicate_lib.h>
7287 +
7288 +static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
7289 +				   free_page_t put_new_page,
7290 +				   unsigned long private, struct page *page,
7291 +				   int force, enum migrate_mode mode)
7292 +{
7293 +	int rc = 0;
7294 +	int *result = NULL;
7295 +	struct page *newpage;
7296 +	struct shared_lib_page *lib_page;
7297 +	int master_exist_in_psl = 0, has_replica = 0, cpu = private/2;
7298 +	
7299 +	/* check if this page is in the PSL list */
7300 +	rcu_read_lock();
7301 +	list_for_each_entry(lib_page, &shared_lib_pages, list)
7302 +	{
7303 +		if (page_to_pfn(page) == lib_page->master_pfn) {
7304 +			master_exist_in_psl = 1;
7305 +			break;
7306 +		}
7307 +	}
7308 +	rcu_read_unlock();
7309 +
7310 +	if (lib_page->r_page[cpu] == NULL) {
7311 +		newpage = get_new_page(page, private, &result);
7312 +		if (!newpage)
7313 +			return -ENOMEM;
7314 +	} else {
7315 +		newpage = lib_page->r_page[cpu];
7316 +		has_replica = 1;
7317 +	}
7318 +
7319 +	if (page_count(page) == 1) {
7320 +		/* page was freed from under us. So we are done. */
7321 +		goto out;
7322 +	}
7323 +
7324 +	if (unlikely(PageTransHuge(page)))
7325 +		if (unlikely(split_huge_page(page)))
7326 +			goto out;
7327 +
7328 +	rc = __unmap_and_copy(page, newpage, force, mode, has_replica);
7329 +	
7330 +	if (has_replica == 0 && rc == MIGRATEPAGE_SUCCESS) {
7331 +		lib_page->r_page[cpu] = newpage;
7332 +		lib_page->r_pfn[cpu] = page_to_pfn(newpage);
7333 +	}
7334 +	
7335 +out:
7336 +	if (rc != -EAGAIN) {
7337 +		/*
7338 +		 * A page that has been migrated has all references
7339 +		 * removed and will be freed. A page that has not been
7340 +		 * migrated will have kepts its references and be
7341 +		 * restored.
7342 +		 */
7343 +		list_del(&page->lru);
7344 +		dec_zone_page_state(page, NR_ISOLATED_ANON +
7345 +				page_is_file_cache(page));
7346 +		putback_lru_page(page);
7347 +	}
7348 +	
7349 +	/*
7350 +	 * If migration was not successful and there's a freeing callback, use
7351 +	 * it.  Otherwise, putback_lru_page() will drop the reference grabbed
7352 +	 * during isolation.
7353 +	 */
7354 +	if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
7355 +		ClearPageSwapBacked(newpage);
7356 +		put_new_page(newpage, private);
7357 +	} else if (unlikely(__is_movable_balloon_page(newpage))) {
7358 +		/* drop our reference, page already in the balloon */
7359 +		put_page(newpage);
7360 +	} else
7361 +		putback_lru_page(newpage);
7362 +
7363 +	if (result) {
7364 +		if (rc)
7365 +			*result = rc;
7366 +		else
7367 +			*result = page_to_nid(newpage);
7368 +	}
7369 +	return rc;
7370 +}
7371 +
7372 +/*
7373   * Counterpart of unmap_and_move_page() for hugepage migration.
7374   *
7375   * This function doesn't wait the completion of hugepage I/O
7376 @@ -1159,6 +1544,85 @@ out:
7377  	return rc;
7378  }
7379  
7380 +/*
7381 + * replicate_pages - replicate the pages specified in a list
7382 + *
7383 + * @from:		The list of pages to be migrated.
7384 + * @get_new_page:	The function used to allocate free pages to be used
7385 + *			if there is no replicated page.
7386 + * @put_new_page:	The function used to free target pages if migration
7387 + *			fails, or NULL if no special handling is necessary.
7388 + * @private:		Private data to be passed on to get_new_page()
7389 + * @mode:		The migration mode that specifies the constraints for
7390 + *			page migration, if any.
7391 + * @reason:		The reason for page migration.
7392 + *
7393 + * The function returns after 10 attempts or if no pages are movable any more
7394 + * because the list has become empty or no retryable pages exist any more.
7395 + * The caller should call putback_lru_pages() to return pages to the LRU
7396 + * or free list only if ret != 0.
7397 + *
7398 + * Returns the number of pages that were not migrated, or an error code.
7399 + */
7400 +int replicate_pages(struct list_head *from, new_page_t get_new_page,
7401 +		free_page_t put_new_page, unsigned long private,
7402 +		enum migrate_mode mode, int reason)
7403 +{
7404 +	int retry = 1;
7405 +	int nr_failed = 0;
7406 +	int nr_succeeded = 0;
7407 +	int pass = 0;
7408 +	struct page *page;
7409 +	struct page *page2;
7410 +	int swapwrite = current->flags & PF_SWAPWRITE;
7411 +	int rc;
7412 +
7413 +	if (!swapwrite)
7414 +		current->flags |= PF_SWAPWRITE;
7415 +
7416 +	for(pass = 0; pass < 10 && retry; pass++) {
7417 +		retry = 0;
7418 +
7419 +		list_for_each_entry_safe(page, page2, from, lru) {
7420 +			cond_resched();
7421 +			
7422 +			rc = unmap_and_copy(get_new_page, put_new_page, private, page, pass > 2, mode);
7423 +
7424 +			switch(rc) {
7425 +			case -ENOMEM:
7426 +				goto out;
7427 +			case -EAGAIN:
7428 +				retry++;
7429 +				break;
7430 +			case MIGRATEPAGE_SUCCESS:
7431 +				nr_succeeded++;
7432 +				break;
7433 +			default:
7434 +				/*
7435 +				 * Permanent failure (-EBUSY, -ENOSYS, etc.):
7436 +				 * unlike -EAGAIN case, the failed page is
7437 +				 * removed from migration page list and not
7438 +				 * retried in the next outer loop.
7439 +				 */
7440 +				nr_failed++;
7441 +				break;
7442 +			}
7443 +		}
7444 +	}
7445 +	rc = nr_failed + retry;
7446 +out:
7447 +	if (nr_succeeded)
7448 +		count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
7449 +	if (nr_failed)
7450 +		count_vm_events(PGMIGRATE_FAIL, nr_failed);
7451 +	trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
7452 +
7453 +	if (!swapwrite)
7454 +		current->flags &= ~PF_SWAPWRITE;
7455 +
7456 +	return rc;
7457 +}
7458 +
7459  #ifdef CONFIG_NUMA
7460  /*
7461   * Move a list of individual pages
7462 diff --git a/mm/rmap.c b/mm/rmap.c
7463 index 24dd3f9..0613210 100644
7464 --- a/mm/rmap.c
7465 +++ b/mm/rmap.c
7466 @@ -1317,6 +1317,170 @@ out_mlock:
7467  	return ret;
7468  }
7469  
7470 +/*
7471 + * @arg: enum ttu_flags will be passed to this argument
7472 + */
7473 +static int try_to_unmap_one_entry(struct page *page, struct vm_area_struct *vma,
7474 +		     unsigned long address, void *arg)
7475 +{
7476 +	struct mm_struct *mm = vma->vm_mm;
7477 +	pte_t *pte;
7478 +	pte_t pteval;
7479 +	spinlock_t *ptl;
7480 +	int ret = SWAP_AGAIN;
7481 +	enum ttu_flags flags = (enum ttu_flags)arg;
7482 +
7483 +	pte = page_check_address(page, mm, address, &ptl, 0);
7484 +	if (!pte)
7485 +		goto out;
7486 +
7487 +	/*
7488 +	 * If the page is mlock()d, we cannot swap it out.
7489 +	 * If it's recently referenced (perhaps page_referenced
7490 +	 * skipped over this mm) then we should reactivate it.
7491 +	 */
7492 +	if (!(flags & TTU_IGNORE_MLOCK)) {
7493 +		if (vma->vm_flags & VM_LOCKED)
7494 +			goto out_mlock;
7495 +
7496 +		if (flags & TTU_MUNLOCK)
7497 +			goto out_unmap;
7498 +	}
7499 +	if (!(flags & TTU_IGNORE_ACCESS)) {
7500 +		if (ptep_clear_flush_young_notify(vma, address, pte)) {
7501 +			ret = SWAP_FAIL;
7502 +			goto out_unmap;
7503 +		}
7504 +  	}
7505 +
7506 +	/* Nuke the page table entry. */
7507 +	flush_cache_page(vma, address, page_to_pfn(page));
7508 +	pteval = ptep_clear_flush(vma, address, pte);
7509 +
7510 +	/* Move the dirty bit to the physical page now the pte is gone. */
7511 +	if (pte_dirty(pteval))
7512 +		set_page_dirty(page);
7513 +
7514 +	/* Update high watermark before we lower rss */
7515 +	update_hiwater_rss(mm);
7516 +
7517 +	if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
7518 +		if (!PageHuge(page)) {
7519 +			if (PageAnon(page))
7520 +				dec_mm_counter(mm, MM_ANONPAGES);
7521 +			else
7522 +				dec_mm_counter(mm, MM_FILEPAGES);
7523 +		}
7524 +		set_pte_at(mm, address, pte,
7525 +			   swp_entry_to_pte(make_hwpoison_entry(page)));
7526 +	} else if (pte_unused(pteval)) {
7527 +		/*
7528 +		 * The guest indicated that the page content is of no
7529 +		 * interest anymore. Simply discard the pte, vmscan
7530 +		 * will take care of the rest.
7531 +		 */
7532 +		if (PageAnon(page))
7533 +			dec_mm_counter(mm, MM_ANONPAGES);
7534 +		else
7535 +			dec_mm_counter(mm, MM_FILEPAGES);
7536 +	} else if (PageAnon(page)) {
7537 +		swp_entry_t entry = { .val = page_private(page) };
7538 +		pte_t swp_pte;
7539 +
7540 +		if (PageSwapCache(page)) {
7541 +			/*
7542 +			 * Store the swap location in the pte.
7543 +			 * See handle_pte_fault() ...
7544 +			 */
7545 +			if (swap_duplicate(entry) < 0) {
7546 +				set_pte_at(mm, address, pte, pteval);
7547 +				ret = SWAP_FAIL;
7548 +				goto out_unmap;
7549 +			}
7550 +			if (list_empty(&mm->mmlist)) {
7551 +				spin_lock(&mmlist_lock);
7552 +				if (list_empty(&mm->mmlist))
7553 +					list_add(&mm->mmlist, &init_mm.mmlist);
7554 +				spin_unlock(&mmlist_lock);
7555 +			}
7556 +			dec_mm_counter(mm, MM_ANONPAGES);
7557 +			inc_mm_counter(mm, MM_SWAPENTS);
7558 +		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
7559 +			/*
7560 +			 * Store the pfn of the page in a special migration
7561 +			 * pte. do_swap_page() will wait until the migration
7562 +			 * pte is removed and then restart fault handling.
7563 +			 */
7564 +			BUG_ON(!(flags & TTU_MIGRATION));
7565 +			entry = make_migration_entry(page, pte_write(pteval));
7566 +		}
7567 +		swp_pte = swp_entry_to_pte(entry);
7568 +		if (pte_soft_dirty(pteval))
7569 +			swp_pte = pte_swp_mksoft_dirty(swp_pte);
7570 +		set_pte_at(mm, address, pte, swp_pte);
7571 +	} else if (IS_ENABLED(CONFIG_MIGRATION) &&
7572 +		   (flags & TTU_MIGRATION)) {
7573 +		/* Establish migration entry for a file page */
7574 +		swp_entry_t entry;
7575 +		entry = make_migration_entry(page, pte_write(pteval));
7576 +		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));	
7577 +	} else
7578 +		dec_mm_counter(mm, MM_FILEPAGES);
7579 +
7580 +	page_remove_rmap(page);
7581 +	page_cache_release(page);
7582 +
7583 +out_unmap:
7584 +	pte_unmap_unlock(pte, ptl);
7585 +	if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK)) {
7586 +		mmu_notifier_invalidate_page(mm, address);
7587 +		ret = SWAP_SUCCESS;
7588 +	}
7589 +out:
7590 +	return ret;
7591 +
7592 +out_mlock:
7593 +	pte_unmap_unlock(pte, ptl);
7594 +
7595 +
7596 +	/*
7597 +	 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
7598 +	 * unstable result and race. Plus, We can't wait here because
7599 +	 * we now hold anon_vma->rwsem or mapping->i_mmap_rwsem.
7600 +	 * if trylock failed, the page remain in evictable lru and later
7601 +	 * vmscan could retry to move the page to unevictable lru if the
7602 +	 * page is actually mlocked.
7603 +	 */
7604 +	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
7605 +		if (vma->vm_flags & VM_LOCKED) {
7606 +			mlock_vma_page(page);
7607 +			ret = SWAP_MLOCK;
7608 +		}
7609 +		up_read(&vma->vm_mm->mmap_sem);
7610 +	}
7611 +	return ret;
7612 +}
7613 +
7614 +int try_to_unmap_one_only(struct page *page, struct vm_area_struct *vma,
7615 +		     unsigned long address, void *arg)
7616 +{
7617 +	struct mm_struct *mm = vma->vm_mm;
7618 +	struct mm_struct *current_mm;
7619 +	
7620 +	rcu_read_lock();
7621 +	get_task_struct(current);
7622 +	rcu_read_unlock();
7623 +	current_mm = get_task_mm(current);
7624 +	put_task_struct(current);
7625 +	if (!current_mm)
7626 +		BUG();
7627 +	
7628 +	if (mm == current_mm) {
7629 +		return try_to_unmap_one_entry(page, vma, address, arg);
7630 +	}
7631 +	return SWAP_AGAIN;
7632 +}	
7633 +
7634  bool is_vma_temporary_stack(struct vm_area_struct *vma)
7635  {
7636  	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);

Attached Files

To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.
  • [get | view] (2015-06-04 21:22:59, 244.8 KB) [[attachment:MC2-liblitmus-imx6-rtss15.patch]]
  • [get | view] (2016-05-12 14:35:37, 51.9 KB) [[attachment:MC2-liblitmus-rtss16.patch]]
  • [get | view] (2016-05-12 14:36:06, 190.4 KB) [[attachment:MC2-litmus-rt-rtss16.patch]]
  • [get | view] (2015-07-19 10:27:52, 1119.9 KB) [[attachment:MC2-litmut-rt-imx6-rtss15.patch]]
  • [get | view] (2014-05-27 20:46:19, 58.3 KB) [[attachment:MC2_liblitmus_ipdps15.patch]]
  • [get | view] (2014-05-27 20:45:43, 1044.3 KB) [[attachment:MC2_litmusrt_ipdps15.patch]]
  • [get | view] (2017-04-07 21:48:09, 6099.5 KB) [[attachment:buff_sharing.tar]]
  • [get | view] (2015-01-08 14:20:07, 61.0 KB) [[attachment:feather-trace-patch-against-sched-deadline-v8.patch]]
  • [get | view] (2014-04-01 23:10:10, 38.9 KB) [[attachment:gedf-mp-rtas14.patch]]
  • [get | view] (2012-03-02 20:13:59, 1.9 KB) [[attachment:gpu-klmirqd-liblitmus-rt-ecrts12.patch]]
  • [get | view] (2012-03-02 20:14:25, 389.8 KB) [[attachment:gpu-klmirqd-litmus-rt-ecrts12.patch]]
  • [get | view] (2012-05-26 21:41:34, 418.0 KB) [[attachment:gpusync-rtss12.patch]]
  • [get | view] (2012-05-26 21:42:20, 8.6 KB) [[attachment:gpusync_liblitmus-rtss12.patch]]
  • [get | view] (2013-05-21 15:32:08, 208.6 KB) [[attachment:gpusync_rtss13_liblitmus.patch]]
  • [get | view] (2013-05-21 15:31:32, 779.5 KB) [[attachment:gpusync_rtss13_litmus.patch]]
  • [get | view] (2012-05-26 21:42:41, 71.4 KB) [[attachment:klt_tracker_v1.0.litmus.tgz]]
  • [get | view] (2016-10-13 21:14:05, 19.6 KB) [[attachment:liblitmus-rtas17.patch]]
  • [get | view] (2017-05-01 20:46:22, 90.0 KB) [[attachment:liblitmus-rtns17.patch]]
  • [get | view] (2018-12-11 01:38:53, 49.1 KB) [[attachment:liblitmus-semi-part-with-edfos.patch]]
  • [get | view] (2017-10-09 19:16:09, 304.0 KB) [[attachment:litmus-rt-os-isolation.patch]]
  • [get | view] (2016-10-13 21:13:27, 207.6 KB) [[attachment:litmus-rt-rtas17.patch]]
  • [get | view] (2017-05-01 20:46:40, 207.6 KB) [[attachment:litmus-rt-rtns17.patch]]
  • [get | view] (2018-12-11 01:39:04, 100.5 KB) [[attachment:litmus-rt-semi-part-with-edfos.patch]]
  • [get | view] (2018-06-26 04:31:48, 7.0 KB) [[attachment:mc2_liblitmus_2015.1-rtns18.patch]]
  • [get | view] (2018-06-26 04:31:33, 292.7 KB) [[attachment:mc2_litmus-rt_2015.1-rtns18.patch]]
  • [get | view] (2017-05-01 20:45:10, 2596.9 KB) [[attachment:mcp_study.zip]]
  • [get | view] (2013-07-13 14:11:53, 58.0 KB) [[attachment:omip-ecrts13.patch]]
  • [get | view] (2014-02-19 21:48:33, 17.2 KB) [[attachment:pgmrt-liblitmus-ecrts14.patch]]
  • [get | view] (2014-02-19 21:47:57, 87.8 KB) [[attachment:pgmrt-litmusrt-ecrts14.patch]]
  • [get | view] (2015-01-08 14:22:32, 61.0 KB) [[attachment:sched-deadline-v8-feather-trace-rtas14.patch]]
  • [get | view] (2018-06-26 04:32:13, 2545.1 KB) [[attachment:sched_study_rtns2018.tar.gz]]
  • [get | view] (2017-04-07 21:53:39, 5969.5 KB) [[attachment:seminal.tar]]
  • [get | view] (2017-04-07 21:51:13, 6064.0 KB) [[attachment:shared_libraries.tar]]
  • [get | view] (2013-07-13 13:58:25, 42.7 KB) [[attachment:tracing-and-dflp-rtas13.patch]]
 All files | Selected Files: delete move to page copy to page

You are not allowed to attach a file to this page.