Attachment 'MC2-litmus-rt-rtss16.patch'

Download

   1 diff --git arch/arm/boot/compressed/Makefile arch/arm/boot/compressed/Makefile
   2 index 6e1fb2b..e2284fe 100644
   3 --- arch/arm/boot/compressed/Makefile
   4 +++ arch/arm/boot/compressed/Makefile
   5 @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
   6  ORIG_CFLAGS := $(KBUILD_CFLAGS)
   7  KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
   8  endif
   9 +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
  10  
  11  ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  12  asflags-y := -DZIMAGE
  13 diff --git arch/arm/include/asm/unistd.h arch/arm/include/asm/unistd.h
  14 index 7197bbe..5291b70 100644
  15 --- arch/arm/include/asm/unistd.h
  16 +++ arch/arm/include/asm/unistd.h
  17 @@ -19,7 +19,7 @@
  18   * This may need to be greater than __NR_last_syscall+1 in order to
  19   * account for the padding in the syscall table
  20   */
  21 -#define __NR_syscalls  (388 + NR_litmus_syscalls + 3)
  22 +#define __NR_syscalls  (388 + NR_litmus_syscalls + 0)
  23  
  24  
  25  /*
  26 diff --git arch/arm/kernel/calls.S arch/arm/kernel/calls.S
  27 index 3e00296..55dc863 100644
  28 --- arch/arm/kernel/calls.S
  29 +++ arch/arm/kernel/calls.S
  30 @@ -410,7 +410,13 @@
  31  		CALL(sys_release_ts)
  32  		CALL(sys_null_call)
  33  /* 400 */	CALL(sys_get_current_budget)
  34 -
  35 +		CALL(sys_reservation_create)
  36 +		CALL(sys_reservation_destroy)
  37 +		CALL(sys_set_mc2_task_param)
  38 +		CALL(sys_set_page_color)
  39 +/* 405 */	CALL(sys_test_call)
  40 +		CALL(sys_run_test)
  41 +		CALL(sys_lock_buffer)
  42  
  43  #ifndef syscalls_counted
  44  .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
  45 diff --git arch/arm/mm/cache-l2x0.c arch/arm/mm/cache-l2x0.c
  46 index e309c8f..71c969a 100644
  47 --- arch/arm/mm/cache-l2x0.c
  48 +++ arch/arm/mm/cache-l2x0.c
  49 @@ -33,6 +33,8 @@
  50  #include "cache-tauros3.h"
  51  #include "cache-aurora-l2.h"
  52  
  53 +#include <litmus/cache_proc.h>
  54 +
  55  struct l2c_init_data {
  56  	const char *type;
  57  	unsigned way_size_0;
  58 @@ -726,7 +728,6 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  59  
  60  	if (n) {
  61  		unsigned i;
  62 -
  63  		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
  64  		for (i = 0; i < n; i++)
  65  			pr_cont(" %s", errata[i]);
  66 @@ -774,6 +775,11 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
  67  	},
  68  };
  69  
  70 +void l2c310_flush_all(void)
  71 +{
  72 +	l2c210_flush_all();
  73 +};
  74 +
  75  static int __init __l2c_init(const struct l2c_init_data *data,
  76  			     u32 aux_val, u32 aux_mask, u32 cache_id)
  77  {
  78 @@ -876,6 +882,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
  79  	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
  80  		data->type, cache_id, aux);
  81  
  82 +	litmus_setup_lockdown(l2x0_base, cache_id);
  83 +	
  84  	return 0;
  85  }
  86  
  87 diff --git arch/x86/syscalls/syscall_32.tbl arch/x86/syscalls/syscall_32.tbl
  88 index a0ec8ab..b303a9b 100644
  89 --- arch/x86/syscalls/syscall_32.tbl
  90 +++ arch/x86/syscalls/syscall_32.tbl
  91 @@ -378,3 +378,10 @@
  92  369	i386	release_ts		sys_release_ts
  93  370	i386	null_call		sys_null_call
  94  371	i386	get_current_budget	sys_get_current_budget
  95 +372	i386	reservation_create	sys_reservation_create
  96 +373	i386	reservation_destroy	sys_reservation_destroy
  97 +374	i386	set_mc2_task_param	sys_set_mc2_task_param
  98 +375	i386	set_page_color		sys_set_page_color
  99 +376	i386	test_call		sys_test_call
 100 +377	i386	run_test		sys_run_test
 101 +378	i386	lock_buffer		sys_lock_buffer
 102 diff --git arch/x86/syscalls/syscall_64.tbl arch/x86/syscalls/syscall_64.tbl
 103 index 04f5b74..5f24a80 100644
 104 --- arch/x86/syscalls/syscall_64.tbl
 105 +++ arch/x86/syscalls/syscall_64.tbl
 106 @@ -343,7 +343,13 @@
 107  361	common	release_ts		sys_release_ts
 108  362	common	null_call		sys_null_call
 109  363	common	get_current_budget	sys_get_current_budget
 110 -
 111 +364	common	reservation_create	sys_reservation_create
 112 +365	common	reservation_destroy	sys_reservation_destroy
 113 +366	common	set_mc2_task_param	sys_set_mc2_task_param
 114 +367	common	set_page_color		sys_set_page_color
 115 +368	common	test_call		sys_test_call
 116 +369	common	run_test		sys_run_test
 117 +370	common	lock_buffer		sys_lock_buffer
 118  
 119  #
 120  # x32-specific system call numbers start at 512 to avoid cache impact
 121 diff --git include/litmus/cache_proc.h include/litmus/cache_proc.h
 122 new file mode 100644
 123 index 0000000..e9440de
 124 --- /dev/null
 125 +++ include/litmus/cache_proc.h
 126 @@ -0,0 +1,17 @@
 127 +#ifndef LITMUS_CACHE_PROC_H
 128 +#define LITMUS_CACHE_PROC_H
 129 +
 130 +#ifdef __KERNEL__
 131 +
 132 +void litmus_setup_lockdown(void __iomem*, u32);
 133 +void enter_irq_mode(void);
 134 +void exit_irq_mode(void);
 135 +void flush_cache(int all);
 136 +void lock_cache(int cpu, u32 val);
 137 +
 138 +extern struct page *new_alloc_page_color(unsigned long color);
 139 +
 140 +#endif
 141 +
 142 +#endif
 143 +
 144 diff --git include/litmus/litmus.h include/litmus/litmus.h
 145 index a6eb534..441210c 100644
 146 --- include/litmus/litmus.h
 147 +++ include/litmus/litmus.h
 148 @@ -113,6 +113,13 @@ static inline lt_t litmus_clock(void)
 149  	((current)->state == TASK_RUNNING || 	\
 150  	 preempt_count() & PREEMPT_ACTIVE)
 151  
 152 +#define is_running(t) 			\
 153 +	((t)->state == TASK_RUNNING || 	\
 154 +	 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
 155 +
 156 +#define is_blocked(t)       \
 157 +	(!is_running(t))
 158 +
 159  #define is_released(t, now)	\
 160  	(lt_before_eq(get_release(t), now))
 161  #define is_tardy(t, now)    \
 162 diff --git include/litmus/mc2_common.h include/litmus/mc2_common.h
 163 new file mode 100644
 164 index 0000000..e3c0af2
 165 --- /dev/null
 166 +++ include/litmus/mc2_common.h
 167 @@ -0,0 +1,31 @@
 168 +/*
 169 + * MC^2 common data structures
 170 + */
 171 + 
 172 +#ifndef __UNC_MC2_COMMON_H__
 173 +#define __UNC_MC2_COMMON_H__
 174 +
 175 +enum crit_level {
 176 +	CRIT_LEVEL_A = 0,
 177 +	CRIT_LEVEL_B = 1,
 178 +	CRIT_LEVEL_C = 2,
 179 +	NUM_CRIT_LEVELS = 3,
 180 +};
 181 +
 182 +struct mc2_task {
 183 +	enum crit_level crit;
 184 +	unsigned int res_id;
 185 +};
 186 +
 187 +#ifdef __KERNEL__
 188 +
 189 +#include <litmus/reservation.h>
 190 +
 191 +#define tsk_mc2_data(t)		(tsk_rt(t)->mc2_data)
 192 +
 193 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
 194 +							struct reservation *res);
 195 +	
 196 +#endif /* __KERNEL__ */
 197 +
 198 +#endif
 199 \ No newline at end of file
 200 diff --git include/litmus/polling_reservations.h include/litmus/polling_reservations.h
 201 new file mode 100644
 202 index 0000000..66c9b1e
 203 --- /dev/null
 204 +++ include/litmus/polling_reservations.h
 205 @@ -0,0 +1,36 @@
 206 +#ifndef LITMUS_POLLING_RESERVATIONS_H
 207 +#define LITMUS_POLLING_RESERVATIONS_H
 208 +
 209 +#include <litmus/reservation.h>
 210 +
 211 +struct polling_reservation {
 212 +	/* extend basic reservation */
 213 +	struct reservation res;
 214 +
 215 +	lt_t max_budget;
 216 +	lt_t period;
 217 +	lt_t deadline;
 218 +	lt_t offset;
 219 +};
 220 +
 221 +void polling_reservation_init(struct polling_reservation *pres, int use_edf_prio,
 222 +	int use_periodic_polling, lt_t budget, lt_t period, lt_t deadline, lt_t offset);
 223 +
 224 +struct table_driven_reservation {
 225 +	/* extend basic reservation */
 226 +	struct reservation res;
 227 +
 228 +	lt_t major_cycle;
 229 +	unsigned int next_interval;
 230 +	unsigned int num_intervals;
 231 +	struct lt_interval *intervals;
 232 +
 233 +	/* info about current scheduling slot */
 234 +	struct lt_interval cur_interval;
 235 +	lt_t major_cycle_start;
 236 +};
 237 +
 238 +void table_driven_reservation_init(struct table_driven_reservation *tdres,
 239 +	lt_t major_cycle, struct lt_interval *intervals, unsigned int num_intervals);
 240 +
 241 +#endif
 242 diff --git include/litmus/reservation.h include/litmus/reservation.h
 243 new file mode 100644
 244 index 0000000..7e022b3
 245 --- /dev/null
 246 +++ include/litmus/reservation.h
 247 @@ -0,0 +1,256 @@
 248 +#ifndef LITMUS_RESERVATION_H
 249 +#define LITMUS_RESERVATION_H
 250 +
 251 +#include <linux/list.h>
 252 +#include <linux/hrtimer.h>
 253 +
 254 +struct reservation_client;
 255 +struct reservation_environment;
 256 +struct reservation;
 257 +
 258 +typedef enum {
 259 +	/* reservation has no clients, is not consuming budget */
 260 +	RESERVATION_INACTIVE = 0,
 261 +
 262 +	/* reservation has clients, consumes budget when scheduled */
 263 +	RESERVATION_ACTIVE,
 264 +
 265 +	/* reservation has no clients, but may be consuming budget */
 266 +	RESERVATION_ACTIVE_IDLE,
 267 +
 268 +	/* Reservation has no budget and waits for
 269 +	 * replenishment. May or may not have clients. */
 270 +	RESERVATION_DEPLETED,
 271 +} reservation_state_t;
 272 +
 273 +
 274 +/* ************************************************************************** */
 275 +
 276 +/* Select which task to dispatch. If NULL is returned, it means there is nothing
 277 + * to schedule right now and background work can be scheduled. */
 278 +typedef struct task_struct * (*dispatch_t)  (
 279 +	struct reservation_client *client
 280 +);
 281 +
 282 +/* Something that can be managed in a reservation and that can yield
 283 + * a process for dispatching. Contains a pointer to the reservation
 284 + * to which it "belongs". */
 285 +struct reservation_client {
 286 +	struct list_head list;
 287 +	struct reservation* reservation;
 288 +	dispatch_t dispatch;
 289 +};
 290 +
 291 +
 292 +/* ************************************************************************** */
 293 +
 294 +/* Called by reservations to request state change. */
 295 +typedef void (*reservation_change_state_t)  (
 296 +	struct reservation_environment* env,
 297 +	struct reservation *res,
 298 +	reservation_state_t new_state
 299 +);
 300 +
 301 +/* The framework within wich reservations operate. */
 302 +struct reservation_environment {
 303 +	lt_t time_zero;
 304 +	lt_t current_time;
 305 +
 306 +	/* services invoked by reservations */
 307 +	reservation_change_state_t change_state;
 308 +};
 309 +
 310 +
 311 +/* ************************************************************************** */
 312 +
 313 +/* A new client is added or an existing client resumes. */
 314 +typedef void (*client_arrives_t)  (
 315 +	struct reservation *reservation,
 316 +	struct reservation_client *client
 317 +);
 318 +
 319 +/* A client suspends or terminates. */
 320 +typedef void (*client_departs_t)  (
 321 +	struct reservation *reservation,
 322 +	struct reservation_client *client,
 323 +	int did_signal_job_completion
 324 +);
 325 +
 326 +/* A previously requested replenishment has occurred. */
 327 +typedef void (*on_replenishment_timer_t)  (
 328 +	struct reservation *reservation
 329 +);
 330 +
 331 +/* Update the reservation's budget to reflect execution or idling. */
 332 +typedef void (*drain_budget_t) (
 333 +	struct reservation *reservation,
 334 +	lt_t how_much
 335 +);
 336 +
 337 +/* Select a ready task from one of the clients for scheduling. */
 338 +typedef struct task_struct* (*dispatch_client_t)  (
 339 +	struct reservation *reservation,
 340 +	lt_t *time_slice /* May be used to force rescheduling after
 341 +	                    some amount of time. 0 => no limit */
 342 +);
 343 +
 344 +
 345 +struct reservation_ops {
 346 +	dispatch_client_t dispatch_client;
 347 +
 348 +	client_arrives_t client_arrives;
 349 +	client_departs_t client_departs;
 350 +
 351 +	on_replenishment_timer_t replenish;
 352 +	drain_budget_t drain_budget;
 353 +};
 354 +
 355 +struct reservation {
 356 +	/* used to queue in environment */
 357 +	struct list_head list;
 358 +
 359 +	reservation_state_t state;
 360 +	unsigned int id;
 361 +
 362 +	/* exact meaning defined by impl. */
 363 +	lt_t priority;
 364 +	lt_t cur_budget;
 365 +	lt_t next_replenishment;
 366 +
 367 +	/* budget stats */
 368 +	lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
 369 +	lt_t budget_consumed_total;
 370 +
 371 +	/* interaction with framework */
 372 +	struct reservation_environment *env;
 373 +	struct reservation_ops *ops;
 374 +
 375 +	struct list_head clients;
 376 +	
 377 +	/* for global env. */
 378 +	int scheduled_on;
 379 +	int event_added;
 380 +	/* for blocked by ghost. Do not charge budget when ACTIVE */
 381 +	int blocked_by_ghost;
 382 +	/* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
 383 +	int is_ghost;
 384 +};
 385 +
 386 +void reservation_init(struct reservation *res);
 387 +
 388 +/* Default implementations */
 389 +
 390 +/* simply select the first client in the list, set *for_at_most to zero */
 391 +struct task_struct* default_dispatch_client(
 392 +	struct reservation *res,
 393 +	lt_t *for_at_most
 394 +);
 395 +
 396 +/* "connector" reservation client to hook up tasks with reservations */
 397 +struct task_client {
 398 +	struct reservation_client client;
 399 +	struct task_struct *task;
 400 +};
 401 +
 402 +void task_client_init(struct task_client *tc, struct task_struct *task,
 403 +	struct reservation *reservation);
 404 +
 405 +#define SUP_RESCHEDULE_NOW (0)
 406 +#define SUP_NO_SCHEDULER_UPDATE (ULLONG_MAX)
 407 +
 408 +/* A simple uniprocessor (SUP) flat (i.e., non-hierarchical) reservation
 409 + * environment.
 410 + */
 411 +struct sup_reservation_environment {
 412 +	struct reservation_environment env;
 413 +
 414 +	/* ordered by priority */
 415 +	struct list_head active_reservations;
 416 +
 417 +	/* ordered by next_replenishment */
 418 +	struct list_head depleted_reservations;
 419 +
 420 +	/* unordered */
 421 +	struct list_head inactive_reservations;
 422 +
 423 +	/* - SUP_RESCHEDULE_NOW means call sup_dispatch() now
 424 +	 * - SUP_NO_SCHEDULER_UPDATE means nothing to do
 425 +	 * any other value means program a timer for the given time
 426 +	 */
 427 +	lt_t next_scheduler_update;
 428 +	/* set to true if a call to sup_dispatch() is imminent */
 429 +	bool will_schedule;
 430 +};
 431 +
 432 +/* Contract:
 433 + *  - before calling into sup_ code, or any reservation methods,
 434 + *    update the time with sup_update_time(); and
 435 + *  - after calling into sup_ code, or any reservation methods,
 436 + *    check next_scheduler_update and program timer or trigger
 437 + *    scheduler invocation accordingly.
 438 + */
 439 +
 440 +void sup_init(struct sup_reservation_environment* sup_env);
 441 +void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
 442 +	struct reservation* new_res);
 443 +void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
 444 +	lt_t timeout);
 445 +void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
 446 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
 447 +
 448 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
 449 +	unsigned int id);
 450 +	
 451 +/* A global multiprocessor reservation environment. */
 452 +
 453 +typedef enum {
 454 +	EVENT_REPLENISH = 0,
 455 +	EVENT_DRAIN,
 456 +	EVENT_OTHERS,
 457 +} event_type_t;
 458 +
 459 +
 460 +struct next_timer_event {
 461 +	lt_t next_update;
 462 +	int timer_armed_on;
 463 +	unsigned int id;
 464 +	event_type_t type;
 465 +	struct list_head list;
 466 +};
 467 +
 468 +struct gmp_reservation_environment {
 469 +	raw_spinlock_t lock;
 470 +	struct reservation_environment env;
 471 +
 472 +	/* ordered by priority */
 473 +	struct list_head active_reservations;
 474 +
 475 +	/* ordered by next_replenishment */
 476 +	struct list_head depleted_reservations;
 477 +
 478 +	/* unordered */
 479 +	struct list_head inactive_reservations;
 480 +
 481 +	/* timer event ordered by next_update */
 482 +	struct list_head next_events;
 483 +	
 484 +	/* (schedule_now == true) means call gmp_dispatch() now */
 485 +	int schedule_now;
 486 +	/* set to true if a call to gmp_dispatch() is imminent */
 487 +	bool will_schedule;
 488 +};
 489 +
 490 +void gmp_init(struct gmp_reservation_environment* gmp_env);
 491 +void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
 492 +	struct reservation* new_res);
 493 +void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
 494 +	lt_t timeout, unsigned int id, event_type_t type);
 495 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
 496 +int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
 497 +struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
 498 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
 499 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
 500 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
 501 +	unsigned int id);
 502 +
 503 +#endif
 504 diff --git include/litmus/rt_param.h include/litmus/rt_param.h
 505 index 7b9a909..56de045 100644
 506 --- include/litmus/rt_param.h
 507 +++ include/litmus/rt_param.h
 508 @@ -51,6 +51,16 @@ typedef enum {
 509  	TASK_EARLY
 510  } release_policy_t;
 511  
 512 +#ifdef CONFIG_PGMRT_SUPPORT
 513 +typedef enum {
 514 +    PGM_NOT_A_NODE,
 515 +    PGM_SRC,
 516 +    PGM_SINK,
 517 +    PGM_SRC_SINK,
 518 +    PGM_INTERNAL
 519 +} pgm_node_type_t;
 520 +#endif
 521 +
 522  /* We use the common priority interpretation "lower index == higher priority",
 523   * which is commonly used in fixed-priority schedulability analysis papers.
 524   * So, a numerically lower priority value implies higher scheduling priority,
 525 @@ -62,6 +72,7 @@ typedef enum {
 526  #define LITMUS_MAX_PRIORITY     512
 527  #define LITMUS_HIGHEST_PRIORITY   1
 528  #define LITMUS_LOWEST_PRIORITY    (LITMUS_MAX_PRIORITY - 1)
 529 +#define LITMUS_NO_PRIORITY		UINT_MAX
 530  
 531  /* Provide generic comparison macros for userspace,
 532   * in case that we change this later. */
 533 @@ -71,6 +82,46 @@ typedef enum {
 534  	((p) >= LITMUS_HIGHEST_PRIORITY &&	\
 535  	 (p) <= LITMUS_LOWEST_PRIORITY)
 536  
 537 +/* reservation support */
 538 +
 539 +typedef enum {
 540 +	PERIODIC_POLLING,
 541 +	SPORADIC_POLLING,
 542 +	TABLE_DRIVEN,
 543 +} reservation_type_t;
 544 +
 545 +struct lt_interval {
 546 +	lt_t start;
 547 +	lt_t end;
 548 +};
 549 +
 550 +#ifndef __KERNEL__
 551 +#define __user
 552 +#endif
 553 +
 554 +struct reservation_config {
 555 +	unsigned int id;
 556 +	lt_t priority;
 557 +	int  cpu;
 558 +
 559 +	union {
 560 +		struct {
 561 +			lt_t period;
 562 +			lt_t budget;
 563 +			lt_t relative_deadline;
 564 +			lt_t offset;
 565 +		} polling_params;
 566 +
 567 +		struct {
 568 +			lt_t major_cycle_length;
 569 +			unsigned int num_intervals;
 570 +			struct lt_interval __user *intervals;
 571 +		} table_driven_params;
 572 +	};
 573 +};
 574 +
 575 +/* regular sporadic task support */
 576 +
 577  struct rt_task {
 578  	lt_t 		exec_cost;
 579  	lt_t 		period;
 580 @@ -81,6 +132,10 @@ struct rt_task {
 581  	task_class_t	cls;
 582  	budget_policy_t  budget_policy;  /* ignored by pfair */
 583  	release_policy_t release_policy;
 584 +#ifdef CONFIG_PGMRT_SUPPORT
 585 +	pgm_node_type_t	pgm_type;
 586 +	lt_t			pgm_expected_etoe;
 587 +#endif
 588  };
 589  
 590  union np_flag {
 591 @@ -121,6 +176,13 @@ struct control_page {
 592  	uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
 593  				     * started. */
 594  
 595 +#ifdef CONFIG_PGMRT_SUPPORT
 596 +    /* Flags from userspace signifying PGM wait states. */
 597 +    volatile uint32_t   pgm_waiting;    /* waiting for tokens */
 598 +    volatile uint32_t   pgm_sending;    /* sending tokens */
 599 +    volatile uint32_t   pgm_satisfied;  /* done waiting/sending */
 600 +#endif
 601 +
 602  	/* to be extended */
 603  };
 604  
 605 @@ -165,6 +227,7 @@ struct rt_job {
 606  };
 607  
 608  struct pfair_param;
 609 +struct mc2_task;
 610  
 611  /*	RT task parameters for scheduling extensions
 612   *	These parameters are inherited during clone and therefore must
 613 @@ -246,7 +309,10 @@ struct rt_param {
 614  	volatile int		linked_on;
 615  
 616  	/* PFAIR/PD^2 state. Allocated on demand. */
 617 -	struct pfair_param*	pfair;
 618 +	union {
 619 +		void *plugin_state;
 620 +		struct pfair_param *pfair;
 621 +	};
 622  
 623  	/* Fields saved before BE->RT transition.
 624  	 */
 625 @@ -275,6 +341,10 @@ struct rt_param {
 626  
 627  	/* Pointer to the page shared between userspace and kernel. */
 628  	struct control_page * ctrl_page;
 629 +
 630 +	/* Mixed-criticality specific data */
 631 +	struct mc2_task* mc2_data;
 632 +	unsigned long addr_ctrl_page;
 633  };
 634  
 635  #endif
 636 diff --git include/litmus/sched_plugin.h include/litmus/sched_plugin.h
 637 index f36bb38..4c8aaa6 100644
 638 --- include/litmus/sched_plugin.h
 639 +++ include/litmus/sched_plugin.h
 640 @@ -83,6 +83,10 @@ typedef void (*synchronous_release_at_t)(lt_t time_zero);
 641   * reservation-specific values. */
 642  typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining);
 643  
 644 +/* Reservation creation/removal backends. Meaning of reservation_type and
 645 + * reservation_id are entirely plugin-specific. */
 646 +typedef long (*reservation_create_t)(int reservation_type, void* __user config);
 647 +typedef long (*reservation_destroy_t)(unsigned int reservation_id, int cpu);
 648  
 649  /************************ misc routines ***********************/
 650  
 651 @@ -118,6 +122,10 @@ struct sched_plugin {
 652  
 653  	current_budget_t	current_budget;
 654  
 655 +	/* Reservation support */
 656 +	reservation_create_t	reservation_create;
 657 +	reservation_destroy_t	reservation_destroy;
 658 +
 659  #ifdef CONFIG_LITMUS_LOCKING
 660  	/*	locking protocols	*/
 661  	allocate_lock_t		allocate_lock;
 662 diff --git include/litmus/trace.h include/litmus/trace.h
 663 index 6017872..24ca412 100644
 664 --- include/litmus/trace.h
 665 +++ include/litmus/trace.h
 666 @@ -118,6 +118,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 667  #define TS_TICK_START(t)		CPU_TTIMESTAMP(110, t)
 668  #define TS_TICK_END(t) 			CPU_TTIMESTAMP(111, t)
 669  
 670 +#define TS_RELEASE_C_START		CPU_DTIMESTAMP(108, TSK_RT)
 671 +#define TS_RELEASE_C_END		CPU_DTIMESTAMP(109, TSK_RT)
 672 +
 673  #define TS_QUANTUM_BOUNDARY_START	CPU_TIMESTAMP_CUR(112)
 674  #define TS_QUANTUM_BOUNDARY_END		CPU_TIMESTAMP_CUR(113)
 675  
 676 @@ -137,6 +140,17 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
 677  #define TS_SEND_RESCHED_START(c)	MSG_TIMESTAMP_SENT(190, c)
 678  #define TS_SEND_RESCHED_END		MSG_TIMESTAMP_RECEIVED(191)
 679  
 680 -#define TS_RELEASE_LATENCY(when)	CPU_LTIMESTAMP(208, &(when))
 681 +#define TS_ISR_START			CPU_TIMESTAMP_CUR(192)
 682 +#define TS_ISR_END				CPU_TIMESTAMP_CUR(193)
 683 +
 684 +#define TS_RELEASE_LATENCY(when)    CPU_LTIMESTAMP(208, &(when))
 685 +#define TS_RELEASE_LATENCY_A(when)  CPU_LTIMESTAMP(209, &(when))
 686 +#define TS_RELEASE_LATENCY_B(when)  CPU_LTIMESTAMP(210, &(when))
 687 +#define TS_RELEASE_LATENCY_C(when)  CPU_LTIMESTAMP(211, &(when))
 688 +
 689 +#define TS_SCHED_A_START			CPU_DTIMESTAMP(212, TSK_UNKNOWN)
 690 +#define TS_SCHED_A_END(t)			CPU_TTIMESTAMP(213, t)
 691 +#define TS_SCHED_C_START			CPU_DTIMESTAMP(214, TSK_UNKNOWN)
 692 +#define TS_SCHED_C_END(t)			CPU_TTIMESTAMP(215, t)
 693  
 694  #endif /* !_SYS_TRACE_H_ */
 695 diff --git include/litmus/unistd_32.h include/litmus/unistd_32.h
 696 index 570b1f5..86bbbb8d 100644
 697 --- include/litmus/unistd_32.h
 698 +++ include/litmus/unistd_32.h
 699 @@ -18,5 +18,12 @@
 700  #define __NR_release_ts		__LSC(10)
 701  #define __NR_null_call		__LSC(11)
 702  #define __NR_get_current_budget __LSC(12)
 703 +#define __NR_reservation_create	__LSC(13)
 704 +#define __NR_reservation_destroy	__LSC(14)
 705 +#define __NR_set_mc2_task_param	__LSC(15)
 706 +#define __NR_set_page_color		__LSC(16)
 707 +#define __NR_test_call		__LSC(17)
 708 +#define __NR_run_test		__LSC(18)
 709 +#define __NR_lock_buffer	__LSC(19)
 710  
 711 -#define NR_litmus_syscalls	13
 712 +#define NR_litmus_syscalls	20
 713 diff --git include/litmus/unistd_64.h include/litmus/unistd_64.h
 714 index 3096bf2..4b96e7c 100644
 715 --- include/litmus/unistd_64.h
 716 +++ include/litmus/unistd_64.h
 717 @@ -30,6 +30,21 @@ __SYSCALL(__NR_release_ts, sys_release_ts)
 718  #define __NR_null_call				__LSC(11)
 719  __SYSCALL(__NR_null_call, sys_null_call)
 720  #define __NR_get_current_budget			__LSC(12)
 721 -__SYSCALL(____NR_get_current_budget, sys_get_current_budget)
 722 +__SYSCALL(__NR_get_current_budget, sys_get_current_budget)
 723 +#define __NR_reservation_create		__LSC(13)
 724 +__SYSCALL(__NR_reservation_create, sys_reservation_create)
 725 +#define __NR_reservation_destroy	__LSC(14)
 726 +__SYSCALL(__NR_reservation_destroy, sys_reservation_destroy)
 727 +#define __NR_set_mc2_task_param		__LSC(15)
 728 +__SYSCALL(__NR_set_mc2_task_param,	sys_set_mc2_task_param)
 729 +#define __NR_set_page_color			__LSC(16)
 730 +__SYSCALL(__NR_set_page_color,		sys_set_page_color)
 731 +#define __NR_test_call				__LSC(17)
 732 +__SYSCALL(__NR_test_call, sys_test_call)
 733 +#define __NR_run_test				__LSC(18)
 734 +__SYSCALL(__NR_run_test, sys_run_test)
 735 +#define __NR_lock_buffer			__LSC(19)
 736 +__SYACALL(__NR_lock_buffer, sys_lock_buffer)
 737  
 738 -#define NR_litmus_syscalls 13
 739 +
 740 +#define NR_litmus_syscalls 20
 741 diff --git litmus/Makefile litmus/Makefile
 742 index 7970cd5..e274409 100644
 743 --- litmus/Makefile
 744 +++ litmus/Makefile
 745 @@ -11,6 +11,7 @@ obj-y     = sched_plugin.o litmus.o \
 746  	    sync.o \
 747  	    rt_domain.o \
 748  	    edf_common.o \
 749 +		mc2_common.o \
 750  	    fp_common.o \
 751  	    fdso.o \
 752  	    locking.o \
 753 @@ -19,13 +20,18 @@ obj-y     = sched_plugin.o litmus.o \
 754  	    binheap.o \
 755  	    ctrldev.o \
 756  	    uncachedev.o \
 757 +		reservation.o \
 758 +		polling_reservations.o \
 759  	    sched_gsn_edf.o \
 760  	    sched_psn_edf.o \
 761 -	    sched_pfp.o
 762 +	    sched_pfp.o \
 763 +		sched_mc2.o \
 764 +		bank_proc.o \
 765 +	    color_shm.o \
 766 +		cache_proc.o
 767  
 768  obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
 769  obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
 770 -
 771  obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
 772  obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
 773  obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
 774 diff --git litmus/bank_proc.c litmus/bank_proc.c
 775 new file mode 100644
 776 index 0000000..6103611
 777 --- /dev/null
 778 +++ litmus/bank_proc.c
 779 @@ -0,0 +1,737 @@
 780 +/*
 781 + * bank_proc.c -- Implementation of the page coloring for cache and bank partition. 
 782 + *                The file will keep a pool of colored pages. Users can require pages with 
 783 + *		  specific color or bank number.
 784 + *                Part of the code is modified from Jonathan Herman's code  
 785 + */
 786 +#include <linux/init.h>
 787 +#include <linux/types.h>
 788 +#include <linux/kernel.h>
 789 +#include <linux/module.h>
 790 +#include <linux/sysctl.h>
 791 +#include <linux/slab.h>
 792 +#include <linux/io.h>
 793 +#include <linux/mutex.h>
 794 +#include <linux/mm.h>
 795 +#include <linux/random.h>
 796 +
 797 +#include <litmus/litmus_proc.h>
 798 +#include <litmus/sched_trace.h>
 799 +#include <litmus/litmus.h>
 800 +
 801 +#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
 802 +
 803 +// This Address Decoding is used in imx6-sabredsd platform
 804 +#define BANK_MASK  0x38000000     
 805 +#define BANK_SHIFT  27
 806 +#define CACHE_MASK  0x0000f000      
 807 +#define CACHE_SHIFT 12
 808 +
 809 +#define PAGES_PER_COLOR 1024
 810 +unsigned int NUM_PAGE_LIST;  //8*16
 811 +
 812 +unsigned int number_banks;
 813 +unsigned int number_cachecolors;
 814 +
 815 +unsigned int set_partition_max = 0x0000ffff;
 816 +unsigned int set_partition_min = 0;
 817 +unsigned int bank_partition_max = 0x000000ff;
 818 +unsigned int bank_partition_min = 0;
 819 +
 820 +int show_page_pool = 0;
 821 +int refill_page_pool = 0;
 822 +spinlock_t reclaim_lock;
 823 +
 824 +unsigned int set_partition[9] = {
 825 +        0x00000003,  /* Core 0, and Level A*/
 826 +        0x00000003,  /* Core 0, and Level B*/
 827 +        0x0000000C,  /* Core 1, and Level A*/
 828 +        0x0000000C,  /* Core 1, and Level B*/
 829 +        0x00000030,  /* Core 2, and Level A*/
 830 +        0x00000030,  /* Core 2, and Level B*/
 831 +        0x000000C0,  /* Core 3, and Level A*/
 832 +        0x000000C0,  /* Core 3, and Level B*/
 833 +        0x0000ff00,  /* Level C */
 834 +};
 835 +
 836 +unsigned int bank_partition[9] = {
 837 +        0x00000010,  /* Core 0, and Level A*/
 838 +        0x00000010,  /* Core 0, and Level B*/
 839 +        0x00000020,  /* Core 1, and Level A*/
 840 +        0x00000020,  /* Core 1, and Level B*/
 841 +        0x00000040,  /* Core 2, and Level A*/
 842 +        0x00000040,  /* Core 2, and Level B*/
 843 +        0x00000080,  /* Core 3, and Level A*/
 844 +        0x00000080,  /* Core 3, and Level B*/
 845 +        0x0000000c,  /* Level C */
 846 +};
 847 +
 848 +unsigned int set_index[9] = {
 849 +    0, 0, 0, 0, 0, 0, 0, 0, 0
 850 +};
 851 +
 852 +unsigned int bank_index[9] = {
 853 +    0, 0, 0, 0, 0, 0, 0, 0, 0
 854 +};
 855 +
 856 +struct mutex void_lockdown_proc;
 857 +
 858 +
 859 +/*
 860 + * Every page list should contain a lock, a list, and a number recording how many pages it store
 861 + */ 
 862 +struct color_group {
 863 +	spinlock_t lock;
 864 +	char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
 865 +	struct list_head list;
 866 +	atomic_t nr_pages;
 867 +};
 868 +
 869 +
 870 +static struct color_group *color_groups;
 871 +
 872 +/*
 873 + * Naive function to count the number of 1's
 874 + */
 875 +unsigned int counting_one_set(unsigned int v)
 876 +{
 877 +//    unsigned int v; // count the number of bits set in v
 878 +    unsigned int c; // c accumulates the total bits set in v
 879 +
 880 +    for (c = 0; v; v >>= 1)
 881 +    {
 882 +        c += v & 1;
 883 +    }
 884 +    return c;
 885 +}
 886 +
 887 +unsigned int two_exp(unsigned int e)
 888 +{
 889 +    unsigned int v = 1;
 890 +    for (; e>0; e-- )
 891 +    {
 892 +        v=v*2;
 893 +    }
 894 +    return v;
 895 +}
 896 +
 897 +unsigned int num_by_bitmask_index(unsigned int bitmask, unsigned int index)
 898 +{
 899 +    unsigned int pos = 0;
 900 +
 901 +    while(true)
 902 +    {
 903 +        if(index ==0 && (bitmask & 1)==1)
 904 +        {
 905 +            break;
 906 +        }
 907 +        if(index !=0 && (bitmask & 1)==1){
 908 +            index--;
 909 +        }
 910 +        pos++;
 911 +        bitmask = bitmask >>1;
 912 +
 913 +    }
 914 +    return pos;
 915 +}
 916 +
 917 +
 918 +
 919 +/* Decoding page color, 0~15 */ 
 920 +static inline unsigned int page_color(struct page *page)
 921 +{
 922 +	return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
 923 +}
 924 +
 925 +/* Decoding page bank number, 0~7 */ 
 926 +static inline unsigned int page_bank(struct page *page)
 927 +{
 928 +	return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
 929 +}
 930 +
 931 +static inline unsigned int page_list_index(struct page *page)
 932 +{
 933 +    unsigned int idx;  
 934 +    idx = (page_color(page) + page_bank(page)*(number_cachecolors));
 935 +//    printk("address = %lx, ", page_to_phys(page));
 936 +//    printk("color(%d), bank(%d), indx = %d\n", page_color(page), page_bank(page), idx);
 937 +
 938 +    return idx; 
 939 +}
 940 +
 941 +
 942 +
 943 +/*
 944 + * It is used to determine the smallest number of page lists. 
 945 + */
 946 +static unsigned long smallest_nr_pages(void)
 947 +{
 948 +	unsigned long i, min_pages;
 949 +	struct color_group *cgroup;
 950 +	cgroup = &color_groups[16*2];
 951 +	min_pages =atomic_read(&cgroup->nr_pages); 
 952 +	for (i = 16*2; i < NUM_PAGE_LIST; ++i) {
 953 +		cgroup = &color_groups[i];
 954 +		if (atomic_read(&cgroup->nr_pages) < min_pages)
 955 +			min_pages = atomic_read(&cgroup->nr_pages);
 956 +	}
 957 +	return min_pages;
 958 +}
 959 +
 960 +static void show_nr_pages(void)
 961 +{
 962 +	unsigned long i;
 963 +	struct color_group *cgroup;
 964 +	printk("show nr pages***************************************\n");
 965 +	for (i = 0; i < NUM_PAGE_LIST; ++i) {
 966 +		cgroup = &color_groups[i];
 967 +		printk("(%03d) =  %03d, ", i, atomic_read(&cgroup->nr_pages));
 968 +		if((i % 8) ==7){
 969 +		    printk("\n");
 970 +                }
 971 +	}
 972 +}
 973 +
 974 +/*
 975 + * Add a page to current pool.
 976 + */
 977 +void add_page_to_color_list(struct page *page)
 978 +{
 979 +	const unsigned long color = page_list_index(page);
 980 +	struct color_group *cgroup = &color_groups[color];
 981 +	BUG_ON(in_list(&page->lru) || PageLRU(page));
 982 +	BUG_ON(page_count(page) > 1);
 983 +	spin_lock(&cgroup->lock);
 984 +	list_add_tail(&page->lru, &cgroup->list);
 985 +	atomic_inc(&cgroup->nr_pages);
 986 +	SetPageLRU(page);
 987 +	spin_unlock(&cgroup->lock);
 988 +}
 989 +
 990 +/*
 991 + * Replenish the page pool. 
 992 + * If the newly allocate page is what we want, it will be pushed to the correct page list
 993 + * otherwise, it will be freed. 
 994 + */
 995 +static int do_add_pages(void)
 996 +{
 997 +	//printk("LITMUS do add pages\n");
 998 +	
 999 +	struct page *page, *page_tmp;
1000 +	LIST_HEAD(free_later);
1001 +	unsigned long color;
1002 +	int ret = 0;
1003 +	int i = 0;
1004 +	int free_counter = 0;
1005 +	unsigned long counter[128]= {0}; 
1006 +        
1007 +        //printk("Before refill : \n");
1008 +        //show_nr_pages();
1009 +
1010 +	// until all the page lists contain enough pages 
1011 +	//for (i =0; i<5; i++) {
1012 +	for (i=0; i< 1024*100;i++) {
1013 +	//while (smallest_nr_pages() < PAGES_PER_COLOR) {
1014 +       //         printk("smallest = %d\n", smallest_nr_pages());	
1015 +		page = alloc_page(GFP_HIGHUSER_MOVABLE);
1016 +	    //    page = alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0);
1017 +	
1018 +		if (unlikely(!page)) {
1019 +			printk(KERN_WARNING "Could not allocate pages.\n");
1020 +			ret = -ENOMEM;
1021 +			goto out;
1022 +		}
1023 +		color = page_list_index(page);
1024 +		counter[color]++;
1025 +	//	printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1026 +                //show_nr_pages();
1027 +		if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) {
1028 +		//if ( PAGES_PER_COLOR && color>=16*2) {
1029 +			add_page_to_color_list(page);
1030 +	//		printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page));
1031 +		} else{
1032 +			// Pages here will be freed later 
1033 +			list_add_tail(&page->lru, &free_later);
1034 +			free_counter++;
1035 +		        //list_del(&page->lru);
1036 +		//        __free_page(page);
1037 +	//		printk("useless page(%d) = color %x, bank %x\n", color,  page_color(page), page_bank(page));
1038 +		}
1039 +               //show_nr_pages();
1040 +                /*
1041 +                if(free_counter >= PAGES_PER_COLOR)
1042 +                {
1043 +                    printk("free unwanted page list eariler");
1044 +                    free_counter = 0;
1045 +	            list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1046 +		        list_del(&page->lru);
1047 +		        __free_page(page);
1048 +	            }
1049 +
1050 +                    show_nr_pages();
1051 +                }
1052 +                */
1053 +        }
1054 +/*        printk("page counter = \n");
1055 +        for (i=0; i<128; i++)
1056 +        {
1057 +            printk("(%03d) = %4d, ", i , counter[i]);
1058 +            if(i%8 == 7){
1059 +                printk("\n");
1060 +            }
1061 +
1062 +        }
1063 +*/	
1064 +        //printk("After refill : \n");
1065 +        //show_nr_pages();
1066 +#if 1
1067 +	// Free the unwanted pages
1068 +	list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1069 +		list_del(&page->lru);
1070 +		__free_page(page);
1071 +	}
1072 +#endif
1073 +out:
1074 +        return ret;
1075 +}
1076 +
1077 +/*
1078 + * Provide pages for replacement according cache color 
1079 + * This should be the only implementation here
1080 + * This function should not be accessed by others directly. 
1081 + * 
1082 + */ 
1083 +static struct  page *new_alloc_page_color( unsigned long color)
1084 +{
1085 +//	printk("allocate new page color = %d\n", color);	
1086 +	struct color_group *cgroup;
1087 +	struct page *rPage = NULL;
1088 +		
1089 +	if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
1090 +		TRACE_CUR("Wrong color %lu\n", color);	
1091 +//		printk(KERN_WARNING "Wrong color %lu\n", color);
1092 +		goto out;
1093 +	}
1094 +
1095 +		
1096 +	cgroup = &color_groups[color];
1097 +	spin_lock(&cgroup->lock);
1098 +	if (unlikely(!atomic_read(&cgroup->nr_pages))) {
1099 +		TRACE_CUR("No free %lu colored pages.\n", color);
1100 +//		printk(KERN_WARNING "no free %lu colored pages.\n", color);
1101 +		goto out_unlock;
1102 +	}
1103 +	rPage = list_first_entry(&cgroup->list, struct page, lru);
1104 +	BUG_ON(page_count(rPage) > 1);
1105 +	//get_page(rPage);
1106 +	list_del(&rPage->lru);
1107 +	atomic_dec(&cgroup->nr_pages);
1108 +	ClearPageLRU(rPage);
1109 +out_unlock:
1110 +	spin_unlock(&cgroup->lock);
1111 +out:
1112 +	if( smallest_nr_pages() == 0)
1113 +        {
1114 +		do_add_pages();
1115 +       //     printk("ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n");        
1116 +        
1117 +        }
1118 +	return rPage;
1119 +}
1120 +
1121 +struct page* get_colored_page(unsigned long color)
1122 +{
1123 +	return new_alloc_page_color(color);
1124 +}
1125 +
1126 +/*
1127 + * provide pages for replacement according to  
1128 + * node = 0 for Level A tasks in Cpu 0
1129 + * node = 1 for Level B tasks in Cpu 0
1130 + * node = 2 for Level A tasks in Cpu 1
1131 + * node = 3 for Level B tasks in Cpu 1
1132 + * node = 4 for Level A tasks in Cpu 2
1133 + * node = 5 for Level B tasks in Cpu 2
1134 + * node = 6 for Level A tasks in Cpu 3
1135 + * node = 7 for Level B tasks in Cpu 3
1136 + * node = 8 for Level C tasks 
1137 + */
1138 +struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
1139 +{
1140 +//	printk("allocate new page node = %d\n", node);	
1141 +//	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
1142 +	struct color_group *cgroup;
1143 +	struct page *rPage = NULL;
1144 +	unsigned int color;
1145 +	
1146 +
1147 +        unsigned int idx = 0;
1148 +        idx += num_by_bitmask_index(set_partition[node], set_index[node]);
1149 +        idx += number_cachecolors* num_by_bitmask_index(bank_partition[node], bank_index[node]);
1150 +	//printk("node  = %d, idx = %d\n", node, idx);
1151 +
1152 +	rPage =  new_alloc_page_color(idx);
1153 +        
1154 +            
1155 +        set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]);
1156 +        bank_index[node] = (bank_index[node]+1) % counting_one_set(bank_partition[node]);
1157 +	return rPage; 
1158 +}
1159 +
1160 +
1161 +/*
1162 + * Reclaim pages.
1163 + */
1164 +void reclaim_page(struct page *page)
1165 +{
1166 +	const unsigned long color = page_list_index(page);
1167 +	unsigned long nr_reclaimed = 0;
1168 +	spin_lock(&reclaim_lock);
1169 +    	put_page(page);
1170 +	add_page_to_color_list(page);
1171 +
1172 +	spin_unlock(&reclaim_lock);
1173 +	printk("Reclaimed page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1174 +}
1175 +
1176 +
1177 +/*
1178 + * Initialize the numbers of banks and cache colors 
1179 + */ 
1180 +static int __init init_variables(void)
1181 +{
1182 +	number_banks = counting_one_set(BANK_MASK); 
1183 +	number_banks = two_exp(number_banks); 
1184 +
1185 +	number_cachecolors = counting_one_set(CACHE_MASK);
1186 +	number_cachecolors = two_exp(number_cachecolors);
1187 +	NUM_PAGE_LIST = number_banks * number_cachecolors; 
1188 +        printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
1189 +	mutex_init(&void_lockdown_proc);
1190 +	spin_lock_init(&reclaim_lock);
1191 +
1192 +}
1193 +
1194 +
1195 +/*
1196 + * Initialize the page pool 
1197 + */
1198 +static int __init init_color_groups(void)
1199 +{
1200 +	struct color_group *cgroup;
1201 +	unsigned long i;
1202 +	int err = 0;
1203 +
1204 +        printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
1205 +        color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
1206 +
1207 +	if (!color_groups) {
1208 +		printk(KERN_WARNING "Could not allocate color groups.\n");
1209 +		err = -ENOMEM;
1210 +	}else{
1211 +
1212 +		for (i = 0; i < NUM_PAGE_LIST; ++i) {
1213 +			cgroup = &color_groups[i];
1214 +			atomic_set(&cgroup->nr_pages, 0);
1215 +			INIT_LIST_HEAD(&cgroup->list);
1216 +			spin_lock_init(&cgroup->lock);
1217 +		}
1218 +	}
1219 +        return err;
1220 +}
1221 +
1222 +int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1223 +		size_t *lenp, loff_t *ppos)
1224 +{
1225 +	int ret = 0, i = 0;
1226 +	mutex_lock(&void_lockdown_proc);
1227 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1228 +	if (ret)
1229 +		goto out;
1230 +	if (write) {
1231 +            printk("New set Partition : \n");
1232 +	    for(i =0;i <9;i++)
1233 +            {
1234 +                set_index[i] = 0;
1235 +                printk("set[%d] = %x \n", i, set_partition[i]);
1236 +            }
1237 +	}
1238 +out:
1239 +	mutex_unlock(&void_lockdown_proc);
1240 +	return ret;
1241 +}
1242 +
1243 +int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1244 +		size_t *lenp, loff_t *ppos)
1245 +{
1246 +	int ret = 0, i = 0;
1247 +	mutex_lock(&void_lockdown_proc);
1248 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1249 +	if (ret)
1250 +		goto out;
1251 +	if (write) {
1252 +	    for(i =0;i <9;i++)
1253 +            {
1254 +                bank_index[i] = 0;
1255 +            }
1256 +	}
1257 +out:
1258 +	mutex_unlock(&void_lockdown_proc);
1259 +	return ret;
1260 +}
1261 +
1262 +int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1263 +		size_t *lenp, loff_t *ppos)
1264 +{
1265 +	int ret = 0, i = 0;
1266 +	mutex_lock(&void_lockdown_proc);
1267 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1268 +	if (ret)
1269 +		goto out;
1270 +	if (write) {
1271 +            show_nr_pages();
1272 +	}
1273 +out:
1274 +	mutex_unlock(&void_lockdown_proc);
1275 +	return ret;
1276 +}
1277 +
1278 +int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1279 +		size_t *lenp, loff_t *ppos)
1280 +{
1281 +	int ret = 0, i = 0;
1282 +	mutex_lock(&void_lockdown_proc);
1283 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1284 +	if (ret)
1285 +		goto out;
1286 +	if (write) {
1287 +            do_add_pages();
1288 +	}
1289 +out:
1290 +	mutex_unlock(&void_lockdown_proc);
1291 +	return ret;
1292 +}
1293 +
1294 +static struct ctl_table cache_table[] =
1295 +{
1296 +        
1297 +	{
1298 +		.procname	= "C0_LA_set",
1299 +		.mode		= 0666,
1300 +		.proc_handler	= set_partition_handler,
1301 +		.data		= &set_partition[0],
1302 +		.maxlen		= sizeof(set_partition[0]),
1303 +		.extra1		= &set_partition_min,
1304 +		.extra2		= &set_partition_max,
1305 +	},	
1306 +	{
1307 +		.procname	= "C0_LB_set",
1308 +		.mode		= 0666,
1309 +		.proc_handler	= set_partition_handler,
1310 +		.data		= &set_partition[1],
1311 +		.maxlen		= sizeof(set_partition[1]),
1312 +		.extra1		= &set_partition_min,
1313 +		.extra2		= &set_partition_max,
1314 +	},	
1315 +	{
1316 +		.procname	= "C1_LA_set",
1317 +		.mode		= 0666,
1318 +		.proc_handler	= set_partition_handler,
1319 +		.data		= &set_partition[2],
1320 +		.maxlen		= sizeof(set_partition[2]),
1321 +		.extra1		= &set_partition_min,
1322 +		.extra2		= &set_partition_max,
1323 +	},
1324 +	{
1325 +		.procname	= "C1_LB_set",
1326 +		.mode		= 0666,
1327 +		.proc_handler	= set_partition_handler,
1328 +		.data		= &set_partition[3],
1329 +		.maxlen		= sizeof(set_partition[3]),
1330 +		.extra1		= &set_partition_min,
1331 +		.extra2		= &set_partition_max,
1332 +	},
1333 +	{
1334 +		.procname	= "C2_LA_set",
1335 +		.mode		= 0666,
1336 +		.proc_handler	= set_partition_handler,
1337 +		.data		= &set_partition[4],
1338 +		.maxlen		= sizeof(set_partition[4]),
1339 +		.extra1		= &set_partition_min,
1340 +		.extra2		= &set_partition_max,
1341 +	},
1342 +	{
1343 +		.procname	= "C2_LB_set",
1344 +		.mode		= 0666,
1345 +		.proc_handler	= set_partition_handler,
1346 +		.data		= &set_partition[5],
1347 +		.maxlen		= sizeof(set_partition[5]),
1348 +		.extra1		= &set_partition_min,
1349 +		.extra2		= &set_partition_max,
1350 +	},
1351 +	{
1352 +		.procname	= "C3_LA_set",
1353 +		.mode		= 0666,
1354 +		.proc_handler	= set_partition_handler,
1355 +		.data		= &set_partition[6],
1356 +		.maxlen		= sizeof(set_partition[6]),
1357 +		.extra1		= &set_partition_min,
1358 +		.extra2		= &set_partition_max,
1359 +	},
1360 +	{
1361 +		.procname	= "C3_LB_set",
1362 +		.mode		= 0666,
1363 +		.proc_handler	= set_partition_handler,
1364 +		.data		= &set_partition[7],
1365 +		.maxlen		= sizeof(set_partition[7]),
1366 +		.extra1		= &set_partition_min,
1367 +		.extra2		= &set_partition_max,
1368 +	},	
1369 +	{
1370 +		.procname	= "Call_LC_set",
1371 +		.mode		= 0666,
1372 +		.proc_handler	= set_partition_handler,
1373 +		.data		= &set_partition[8],
1374 +		.maxlen		= sizeof(set_partition[8]),
1375 +		.extra1		= &set_partition_min,
1376 +		.extra2		= &set_partition_max,
1377 +	},	
1378 +	{
1379 +		.procname	= "C0_LA_bank",
1380 +		.mode		= 0666,
1381 +		.proc_handler	= bank_partition_handler,
1382 +		.data		= &bank_partition[0],
1383 +		.maxlen		= sizeof(set_partition[0]),
1384 +		.extra1		= &bank_partition_min,
1385 +		.extra2		= &bank_partition_max,
1386 +	},
1387 +	{
1388 +		.procname	= "C0_LB_bank",
1389 +		.mode		= 0666,
1390 +		.proc_handler	= bank_partition_handler,
1391 +		.data		= &bank_partition[1],
1392 +		.maxlen		= sizeof(set_partition[1]),
1393 +		.extra1		= &bank_partition_min,
1394 +		.extra2		= &bank_partition_max,
1395 +	},		
1396 +	{
1397 +		.procname	= "C1_LA_bank",
1398 +		.mode		= 0666,
1399 +		.proc_handler	= bank_partition_handler,
1400 +		.data		= &bank_partition[2],
1401 +		.maxlen		= sizeof(set_partition[2]),
1402 +		.extra1		= &bank_partition_min,
1403 +		.extra2		= &bank_partition_max,
1404 +	},
1405 +	{
1406 +		.procname	= "C1_LB_bank",
1407 +		.mode		= 0666,
1408 +		.proc_handler	= bank_partition_handler,
1409 +		.data		= &bank_partition[3],
1410 +		.maxlen		= sizeof(set_partition[3]),
1411 +		.extra1		= &bank_partition_min,
1412 +		.extra2		= &bank_partition_max,
1413 +	},
1414 +	{
1415 +		.procname	= "C2_LA_bank",
1416 +		.mode		= 0666,
1417 +		.proc_handler	= bank_partition_handler,
1418 +		.data		= &bank_partition[4],
1419 +		.maxlen		= sizeof(set_partition[4]),
1420 +		.extra1		= &bank_partition_min,
1421 +		.extra2		= &bank_partition_max,
1422 +	},	
1423 +	{
1424 +		.procname	= "C2_LB_bank",
1425 +		.mode		= 0666,
1426 +		.proc_handler	= bank_partition_handler,
1427 +		.data		= &bank_partition[5],
1428 +		.maxlen		= sizeof(set_partition[5]),
1429 +		.extra1		= &bank_partition_min,
1430 +		.extra2		= &bank_partition_max,
1431 +	},		
1432 +	{
1433 +		.procname	= "C3_LA_bank",
1434 +		.mode		= 0666,
1435 +		.proc_handler	= bank_partition_handler,
1436 +		.data		= &bank_partition[6],
1437 +		.maxlen		= sizeof(set_partition[6]),
1438 +		.extra1		= &bank_partition_min,
1439 +		.extra2		= &bank_partition_max,
1440 +	},	
1441 +	{
1442 +		.procname	= "C3_LB_bank",
1443 +		.mode		= 0666,
1444 +		.proc_handler	= bank_partition_handler,
1445 +		.data		= &bank_partition[7],
1446 +		.maxlen		= sizeof(set_partition[7]),
1447 +		.extra1		= &bank_partition_min,
1448 +		.extra2		= &bank_partition_max,
1449 +	},	
1450 +	{
1451 +		.procname	= "Call_LC_bank",
1452 +		.mode		= 0666,
1453 +		.proc_handler	= bank_partition_handler,
1454 +		.data		= &bank_partition[8],
1455 +		.maxlen		= sizeof(set_partition[8]),
1456 +		.extra1		= &bank_partition_min,
1457 +		.extra2		= &bank_partition_max,
1458 +	},	
1459 +	{
1460 +		.procname	= "show_page_pool",
1461 +		.mode		= 0666,
1462 +		.proc_handler	= show_page_pool_handler,
1463 +		.data		= &show_page_pool,
1464 +		.maxlen		= sizeof(show_page_pool),
1465 +	},		{
1466 +		.procname	= "refill_page_pool",
1467 +		.mode		= 0666,
1468 +		.proc_handler	= refill_page_pool_handler,
1469 +		.data		= &refill_page_pool,
1470 +		.maxlen		= sizeof(refill_page_pool),
1471 +	},	
1472 +	{ }
1473 +};
1474 +
1475 +static struct ctl_table litmus_dir_table[] = {
1476 +	{
1477 +		.procname	= "litmus",
1478 + 		.mode		= 0555,
1479 +		.child		= cache_table,
1480 +	},
1481 +	{ }
1482 +};
1483 +
1484 +
1485 +static struct ctl_table_header *litmus_sysctls;
1486 +
1487 +
1488 +/*
1489 + * Initialzie this proc 
1490 + */
1491 +static int __init litmus_color_init(void)
1492 +{
1493 +	int err=0;
1494 +        printk("Init bankproc.c\n");
1495 +
1496 +	init_variables();
1497 +
1498 +	printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
1499 +
1500 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
1501 +	if (!litmus_sysctls) {
1502 +		printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
1503 +		err = -EFAULT;
1504 +		goto out;
1505 +	}
1506 +
1507 +	init_color_groups();			
1508 +	do_add_pages();
1509 +
1510 +	printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
1511 +out:
1512 +	return err;
1513 +}
1514 +
1515 +module_init(litmus_color_init);
1516 +
1517 diff --git litmus/cache_proc.c litmus/cache_proc.c
1518 new file mode 100644
1519 index 0000000..afc0319
1520 --- /dev/null
1521 +++ litmus/cache_proc.c
1522 @@ -0,0 +1,1403 @@
1523 +#include <asm/uaccess.h>
1524 +#include <linux/uaccess.h>
1525 +#include <linux/init.h>
1526 +#include <linux/types.h>
1527 +#include <linux/kernel.h>
1528 +#include <linux/module.h>
1529 +#include <linux/sysctl.h>
1530 +#include <linux/slab.h>
1531 +#include <linux/io.h>
1532 +#include <linux/mutex.h>
1533 +#include <linux/time.h>
1534 +#include <linux/random.h>
1535 +
1536 +#include <litmus/litmus_proc.h>
1537 +#include <litmus/sched_trace.h>
1538 +#include <litmus/cache_proc.h>
1539 +#include <litmus/mc2_common.h>
1540 +#include <litmus/litmus.h>
1541 +
1542 +#include <asm/hardware/cache-l2x0.h>
1543 +#include <asm/cacheflush.h>
1544 +
1545 +
1546 +#define UNLOCK_ALL	0x00000000 /* allocation in any way */
1547 +#define LOCK_ALL        (~UNLOCK_ALL)
1548 +#define MAX_NR_WAYS	16
1549 +#define MAX_NR_COLORS	16
1550 +#define CACHELINE_SIZE 32
1551 +#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
1552 +#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
1553 +
1554 +typedef struct cacheline
1555 +{
1556 +        int line[INTS_IN_CACHELINE];
1557 +} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
1558 +
1559 +void mem_lock(u32 lock_val, int cpu);
1560 +
1561 +/*
1562 + * unlocked_way[i] : allocation can occur in way i
1563 + *
1564 + * 0 = allocation can occur in the corresponding way
1565 + * 1 = allocation cannot occur in the corresponding way
1566 + */
1567 +u32 unlocked_way[MAX_NR_WAYS]  = {
1568 +	0xFFFFFFFE, /* way 0 unlocked */
1569 +	0xFFFFFFFD,
1570 +	0xFFFFFFFB,
1571 +	0xFFFFFFF7,
1572 +	0xFFFFFFEF, /* way 4 unlocked */
1573 +	0xFFFFFFDF,
1574 +	0xFFFFFFBF,
1575 +	0xFFFFFF7F,
1576 +	0xFFFFFEFF, /* way 8 unlocked */
1577 +	0xFFFFFDFF,
1578 +	0xFFFFFBFF,
1579 +	0xFFFFF7FF,
1580 +	0xFFFFEFFF, /* way 12 unlocked */
1581 +	0xFFFFDFFF,
1582 +	0xFFFFBFFF,
1583 +	0xFFFF7FFF,
1584 +};
1585 +
1586 +u32 nr_unlocked_way[MAX_NR_WAYS+1]  = {
1587 +	0x0000FFFF, /* all ways are locked. usable = 0*/
1588 +	0x0000FFFE, /* way ~0 unlocked. usable = 1 */
1589 +	0x0000FFFC,
1590 +	0x0000FFF8,
1591 +	0x0000FFF0,
1592 +	0x0000FFE0,
1593 +	0x0000FFC0,
1594 +	0x0000FF80,
1595 +	0x0000FF00,
1596 +	0x0000FE00,
1597 +	0x0000FC00,
1598 +	0x0000F800,
1599 +	0x0000F000,
1600 +	0x0000E000,
1601 +	0x0000C000,
1602 +	0x00008000,
1603 +	0x00000000, /* way ~15 unlocked. usable = 16 */
1604 +};
1605 +
1606 +u32 way_partition[4] = {
1607 +	0xfffffff0, /* cpu0 */
1608 +	0xffffff0f, /* cpu1 */
1609 +	0xfffff0ff, /* cpu2 */
1610 +	0xffff0fff, /* cpu3 */
1611 +};
1612 +
1613 +u32 way_partitions[9] = {
1614 +	0xffff0003, /* cpu0 A */
1615 +	0xffff0003, /* cpu0 B */
1616 +	0xffff000C, /* cpu1 A */
1617 +	0xffff000C, /* cpu1 B */
1618 +	0xffff0030, /* cpu2 A */
1619 +	0xffff0030, /* cpu2 B */
1620 +	0xffff00C0, /* cpu3 A */
1621 +	0xffff00C0, /* cpu3 B */
1622 +	0xffffff00, /* lv C */
1623 +};
1624 +
1625 +u32 prev_lockdown_d_reg[5] = {
1626 +	0x0000FF00,
1627 +	0x0000FF00,
1628 +	0x0000FF00,
1629 +	0x0000FF00,
1630 +	0x000000FF, /* share with level-C */
1631 +};
1632 +
1633 +u32 prev_lockdown_i_reg[5] = {
1634 +	0x0000FF00,
1635 +	0x0000FF00,
1636 +	0x0000FF00,
1637 +	0x0000FF00,
1638 +	0x000000FF, /* share with level-C */
1639 +};
1640 +
1641 +u32 prev_lbm_i_reg[8] = {
1642 +	0x00000000,
1643 +	0x00000000,
1644 +	0x00000000,
1645 +	0x00000000,
1646 +	0x00000000,
1647 +	0x00000000,
1648 +	0x00000000,
1649 +	0x00000000,
1650 +};
1651 +
1652 +u32 prev_lbm_d_reg[8] = {
1653 +	0x00000000,
1654 +	0x00000000,
1655 +	0x00000000,
1656 +	0x00000000,
1657 +	0x00000000,
1658 +	0x00000000,
1659 +	0x00000000,
1660 +	0x00000000,
1661 +};
1662 +
1663 +static void __iomem *cache_base;
1664 +static void __iomem *lockreg_d;
1665 +static void __iomem *lockreg_i;
1666 +
1667 +static u32 cache_id;
1668 +
1669 +struct mutex actlr_mutex;
1670 +struct mutex l2x0_prefetch_mutex;
1671 +struct mutex lockdown_proc;
1672 +static u32 way_partition_min;
1673 +static u32 way_partition_max;
1674 +
1675 +static int zero = 0;
1676 +static int one = 1;
1677 +
1678 +static int l1_prefetch_proc;
1679 +static int l2_prefetch_hint_proc;
1680 +static int l2_double_linefill_proc;
1681 +static int l2_data_prefetch_proc;
1682 +static int os_isolation;
1683 +static int use_part;
1684 +
1685 +u32 lockdown_reg[9] = {
1686 +	0x00000000,
1687 +	0x00000000,
1688 +	0x00000000,
1689 +	0x00000000,
1690 +	0x00000000,
1691 +	0x00000000,
1692 +	0x00000000,
1693 +	0x00000000,
1694 +};
1695 +	
1696 +
1697 +#define ld_d_reg(cpu) ({ int __cpu = cpu; \
1698 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
1699 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
1700 +#define ld_i_reg(cpu) ({ int __cpu = cpu; \
1701 +			void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
1702 +			__cpu * L2X0_LOCKDOWN_STRIDE; __v; })
1703 +
1704 +int lock_all;
1705 +int nr_lockregs;
1706 +static raw_spinlock_t cache_lock;
1707 +static raw_spinlock_t prefetch_lock;
1708 +static void ***flusher_pages = NULL;
1709 +
1710 +extern void l2c310_flush_all(void);
1711 +
1712 +static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
1713 +{
1714 +	/* wait for cache operation by line or way to complete */
1715 +	while (readl_relaxed(reg) & mask)
1716 +		cpu_relax();
1717 +}
1718 +
1719 +#ifdef CONFIG_CACHE_L2X0 
1720 +static inline void cache_wait(void __iomem *reg, unsigned long mask)
1721 +{
1722 +	/* cache operations by line are atomic on PL310 */
1723 +}
1724 +#else
1725 +#define cache_wait	cache_wait_way
1726 +#endif
1727 +
1728 +static inline void cache_sync(void)
1729 +{
1730 +	void __iomem *base = cache_base;
1731 +
1732 +	writel_relaxed(0, base + L2X0_CACHE_SYNC);
1733 +	cache_wait(base + L2X0_CACHE_SYNC, 1);
1734 +}
1735 +
1736 +static void print_lockdown_registers(int cpu)
1737 +{
1738 +	int i;
1739 +	//for (i = 0; i < nr_lockregs; i++) {
1740 +	for (i = 0; i < 4; i++) {
1741 +		printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
1742 +				i, readl_relaxed(ld_d_reg(i)));
1743 +		printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
1744 +				i, readl_relaxed(ld_i_reg(i)));
1745 +	}
1746 +}
1747 +
1748 +static void test_lockdown(void *ignore)
1749 +{
1750 +	int i, cpu;
1751 +
1752 +	cpu = smp_processor_id();
1753 +	printk("Start lockdown test on CPU %d.\n", cpu);
1754 +
1755 +	for (i = 0; i < nr_lockregs; i++) {
1756 +		printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
1757 +		printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i));
1758 +	}
1759 +
1760 +	printk("Lockdown initial state:\n");
1761 +	print_lockdown_registers(cpu);
1762 +	printk("---\n");
1763 +
1764 +	for (i = 0; i < nr_lockregs; i++) {
1765 +		writel_relaxed(1, ld_d_reg(i));
1766 +		writel_relaxed(2, ld_i_reg(i));
1767 +	}
1768 +	printk("Lockdown all data=1 instr=2:\n");
1769 +	print_lockdown_registers(cpu);
1770 +	printk("---\n");
1771 +
1772 +	for (i = 0; i < nr_lockregs; i++) {
1773 +		writel_relaxed((1 << i), ld_d_reg(i));
1774 +		writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
1775 +	}
1776 +	printk("Lockdown varies:\n");
1777 +	print_lockdown_registers(cpu);
1778 +	printk("---\n");
1779 +
1780 +	for (i = 0; i < nr_lockregs; i++) {
1781 +		writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
1782 +		writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
1783 +	}
1784 +	printk("Lockdown all zero:\n");
1785 +	print_lockdown_registers(cpu);
1786 +
1787 +	printk("End lockdown test.\n");
1788 +}
1789 +
1790 +void litmus_setup_lockdown(void __iomem *base, u32 id)
1791 +{
1792 +	cache_base = base;
1793 +	cache_id = id;
1794 +	lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
1795 +	lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
1796 +    
1797 +	if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
1798 +		nr_lockregs = 8;
1799 +	} else {
1800 +		printk("Unknown cache ID!\n");
1801 +		nr_lockregs = 1;
1802 +	}
1803 +	
1804 +	mutex_init(&actlr_mutex);
1805 +	mutex_init(&l2x0_prefetch_mutex);
1806 +	mutex_init(&lockdown_proc);
1807 +	raw_spin_lock_init(&cache_lock);
1808 +	raw_spin_lock_init(&prefetch_lock);
1809 +	
1810 +	test_lockdown(NULL);
1811 +}
1812 +
1813 +int way_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1814 +		size_t *lenp, loff_t *ppos)
1815 +{
1816 +	int ret = 0, i;
1817 +	unsigned long flags;
1818 +	
1819 +	mutex_lock(&lockdown_proc);
1820 +	
1821 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1822 +	if (ret)
1823 +		goto out;
1824 +	
1825 +	if (write) {
1826 +		printk("Way-partition settings:\n");
1827 +		for (i = 0; i < 9; i++) {
1828 +			printk("0x%08X\n", way_partitions[i]);
1829 +		}
1830 +		for (i = 0; i < 4; i++) {
1831 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
1832 +				       i * L2X0_LOCKDOWN_STRIDE);
1833 +			writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
1834 +				       i * L2X0_LOCKDOWN_STRIDE);
1835 +		}
1836 +	}
1837 +	
1838 +	local_irq_save(flags);
1839 +	print_lockdown_registers(smp_processor_id());
1840 +	l2c310_flush_all();
1841 +	local_irq_restore(flags);
1842 +out:
1843 +	mutex_unlock(&lockdown_proc);
1844 +	return ret;
1845 +}
1846 +
1847 +int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
1848 +		size_t *lenp, loff_t *ppos)
1849 +{
1850 +	int ret = 0, i;
1851 +	unsigned long flags;
1852 +	
1853 +	mutex_lock(&lockdown_proc);
1854 +	
1855 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1856 +	if (ret)
1857 +		goto out;
1858 +	
1859 +	if (write && lock_all == 1) {
1860 +		for (i = 0; i < nr_lockregs; i++) {
1861 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
1862 +				       i * L2X0_LOCKDOWN_STRIDE);
1863 +			writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
1864 +				       i * L2X0_LOCKDOWN_STRIDE);
1865 +		}
1866 +/*		
1867 +		for (i = 0; i < nr_lockregs;  i++) {
1868 +			barrier();
1869 +			mem_lock(LOCK_ALL, i);
1870 +			barrier();
1871 +			//writel_relaxed(nr_unlocked_way[0], ld_d_reg(i));
1872 +			//writel_relaxed(nr_unlocked_way[0], ld_i_reg(i));
1873 +		}
1874 +*/		
1875 +	}
1876 +	if (write && lock_all == 0) {
1877 +		for (i = 0; i < nr_lockregs; i++) {
1878 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
1879 +				       i * L2X0_LOCKDOWN_STRIDE);
1880 +			writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
1881 +				       i * L2X0_LOCKDOWN_STRIDE);
1882 +		}
1883 +/*
1884 +		for (i = 0; i < nr_lockregs;  i++) {
1885 +			barrier();
1886 +			mem_lock(UNLOCK_ALL, i);
1887 +			barrier();
1888 +			//writel_relaxed(nr_unlocked_way[16], ld_d_reg(i));
1889 +			//writel_relaxed(nr_unlocked_way[16], ld_i_reg(i));
1890 +		}
1891 +*/
1892 +	}
1893 +	printk("LOCK_ALL HANDLER\n");
1894 +	local_irq_save(flags);
1895 +	print_lockdown_registers(smp_processor_id());
1896 +	l2c310_flush_all();
1897 +	local_irq_restore(flags);
1898 +out:
1899 +	mutex_unlock(&lockdown_proc);
1900 +	return ret;
1901 +}
1902 +
1903 +void cache_lockdown(u32 lock_val, int cpu)
1904 +{
1905 +	//unsigned long flags;
1906 +	//raw_spin_lock_irqsave(&cache_lock, flags);
1907 +
1908 +	__asm__ __volatile__ (
1909 +"	str	%[lockval], [%[dcachereg]]\n"
1910 +"	str	%[lockval], [%[icachereg]]\n"
1911 +	: 
1912 +	: [dcachereg] "r" (ld_d_reg(cpu)),
1913 +	  [icachereg] "r" (ld_i_reg(cpu)),
1914 +	  [lockval] "r" (lock_val)
1915 +	: "cc");
1916 +
1917 +	//raw_spin_unlock_irqrestore(&cache_lock, flags);
1918 +}
1919 +
1920 +void do_partition(enum crit_level lv, int cpu)
1921 +{
1922 +	u32 regs;
1923 +	unsigned long flags;
1924 +	
1925 +	if (lock_all || !use_part)
1926 +		return;
1927 +	raw_spin_lock_irqsave(&cache_lock, flags);
1928 +	switch(lv) {
1929 +		case CRIT_LEVEL_A:
1930 +			regs = ~way_partitions[cpu*2];
1931 +			regs &= 0x0000ffff;
1932 +			break;
1933 +		case CRIT_LEVEL_B:
1934 +			regs = ~way_partitions[cpu*2+1];
1935 +			regs &= 0x0000ffff;
1936 +			break;
1937 +		case CRIT_LEVEL_C:
1938 +		case NUM_CRIT_LEVELS:
1939 +			regs = ~way_partitions[8];
1940 +			regs &= 0x0000ffff;
1941 +			break;
1942 +		default:
1943 +			BUG();
1944 +
1945 +	}
1946 +	barrier();
1947 +	//cache_lockdown(regs, cpu);
1948 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
1949 +	writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
1950 +	barrier();
1951 +
1952 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
1953 +	
1954 +	flush_cache(0);
1955 +}
1956 +
1957 +void lock_cache(int cpu, u32 val)
1958 +{
1959 +	unsigned long flags;
1960 +	
1961 +	local_irq_save(flags);
1962 +	if (val != 0xffffffff) {
1963 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
1964 +					   cpu * L2X0_LOCKDOWN_STRIDE);
1965 +		writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
1966 +					   cpu * L2X0_LOCKDOWN_STRIDE);
1967 +	}
1968 +	else {
1969 +		int i;
1970 +		for (i = 0; i < 4; i++)
1971 +			do_partition(CRIT_LEVEL_A, i);
1972 +	}
1973 +	local_irq_restore(flags);
1974 +}
1975 +
1976 +int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
1977 +		size_t *lenp, loff_t *ppos)
1978 +{
1979 +	int ret = 0;
1980 +	
1981 +	mutex_lock(&lockdown_proc);
1982 +
1983 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1984 +	if (ret)
1985 +		goto out;
1986 +	
1987 +
1988 +	printk("USE_PART HANDLER = %d\n", use_part);
1989 +
1990 +out:
1991 +	mutex_unlock(&lockdown_proc);
1992 +	return ret;
1993 +}
1994 +
1995 +int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
1996 +		size_t *lenp, loff_t *ppos)
1997 +{
1998 +	int ret = 0;
1999 +	
2000 +	mutex_lock(&lockdown_proc);
2001 +	
2002 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2003 +	if (ret)
2004 +		goto out;
2005 +	
2006 +
2007 +	printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
2008 +
2009 +out:
2010 +	mutex_unlock(&lockdown_proc);
2011 +	return ret;
2012 +}
2013 +
2014 +int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
2015 +		size_t *lenp, loff_t *ppos)
2016 +{
2017 +	int ret = 0, i;
2018 +	
2019 +	mutex_lock(&lockdown_proc);
2020 +	
2021 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2022 +	if (ret)
2023 +		goto out;
2024 +	
2025 +	if (write) {
2026 +		for (i = 0; i < nr_lockregs; i++) {
2027 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2028 +				       i * L2X0_LOCKDOWN_STRIDE);
2029 +			writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2030 +				       i * L2X0_LOCKDOWN_STRIDE);
2031 +		}
2032 +	}
2033 +
2034 +out:
2035 +	mutex_unlock(&lockdown_proc);
2036 +	return ret;
2037 +}
2038 +
2039 +int lockdown_global_handler(struct ctl_table *table, int write, void __user *buffer,
2040 +		size_t *lenp, loff_t *ppos)
2041 +{
2042 +	int ret = 0, i;
2043 +	
2044 +	mutex_lock(&lockdown_proc);
2045 +	
2046 +	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2047 +	if (ret)
2048 +		goto out;
2049 +	
2050 +	if (write) {
2051 +		for (i = 0; i < nr_lockregs; i++) {
2052 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2053 +				       i * L2X0_LOCKDOWN_STRIDE);
2054 +			writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2055 +				       i * L2X0_LOCKDOWN_STRIDE);
2056 +		}
2057 +	}
2058 +
2059 +out:
2060 +	mutex_unlock(&lockdown_proc);
2061 +	return ret;
2062 +}
2063 +
2064 +void inline enter_irq_mode(void)
2065 +{
2066 +	int cpu = smp_processor_id();
2067 +
2068 +	if (os_isolation == 0)
2069 +		return;	
2070 +
2071 +	prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2072 +	prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2073 +	
2074 +	writel_relaxed(way_partitions[8], ld_i_reg(cpu));
2075 +	writel_relaxed(way_partitions[8], ld_d_reg(cpu));
2076 +}
2077 +
2078 +void inline exit_irq_mode(void)
2079 +{
2080 +	int cpu = smp_processor_id();
2081 +
2082 +	if (os_isolation == 0)
2083 +		return;
2084 +	writel_relaxed(prev_lockdown_i_reg[cpu], ld_i_reg(cpu));
2085 +	writel_relaxed(prev_lockdown_d_reg[cpu], ld_d_reg(cpu));	
2086 +}
2087 +
2088 +/* Operate on the Cortex-A9's ACTLR register */
2089 +#define ACTLR_L2_PREFETCH_HINT	(1 << 1)
2090 +#define ACTLR_L1_PREFETCH	(1 << 2)
2091 +
2092 +/*
2093 + * Change the ACTLR.
2094 + * @mode	- If 1 (0), set (clear) the bit given in @mask in the ACTLR.
2095 + * @mask	- A mask in which one bit is set to operate on the ACTLR.
2096 + */
2097 +static void actlr_change(int mode, int mask)
2098 +{
2099 +	u32 orig_value, new_value, reread_value;
2100 +
2101 +	if (0 != mode && 1 != mode) {
2102 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2103 +				__FUNCTION__);
2104 +		return;
2105 +	}
2106 +
2107 +	/* get the original value */
2108 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (orig_value));
2109 +
2110 +	if (0 == mode)
2111 +		new_value = orig_value & ~(mask);
2112 +	else
2113 +		new_value = orig_value | mask;
2114 +
2115 +	asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (new_value));
2116 +	asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (reread_value));
2117 +
2118 +	printk("ACTLR: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2119 +			orig_value, new_value, reread_value);
2120 +}
2121 +
2122 +int litmus_l1_prefetch_proc_handler(struct ctl_table *table, int write,
2123 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2124 +{
2125 +	int ret, mode;
2126 +
2127 +	mutex_lock(&actlr_mutex);
2128 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2129 +
2130 +	if (!ret && write) {
2131 +		mode = *((int*)table->data);
2132 +		actlr_change(mode, ACTLR_L1_PREFETCH);
2133 +	}
2134 +	mutex_unlock(&actlr_mutex);
2135 +
2136 +	return ret;
2137 +}
2138 +
2139 +int litmus_l2_prefetch_hint_proc_handler(struct ctl_table *table, int write,
2140 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2141 +{
2142 +	int ret, mode;
2143 +
2144 +	mutex_lock(&actlr_mutex);
2145 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2146 +	if (!ret && write) {
2147 +		mode = *((int*)table->data);
2148 +		actlr_change(mode, ACTLR_L2_PREFETCH_HINT);
2149 +	}
2150 +	mutex_unlock(&actlr_mutex);
2151 +
2152 +	return ret;
2153 +}
2154 +
2155 +
2156 +/* Operate on the PL-310's Prefetch Control Register, L310_PREFETCH_CTRL */
2157 +#define L2X0_PREFETCH_DOUBLE_LINEFILL	(1 << 30)
2158 +#define L2X0_PREFETCH_INST_PREFETCH	(1 << 29)
2159 +#define L2X0_PREFETCH_DATA_PREFETCH	(1 << 28)
2160 +static void l2x0_prefetch_change(int mode, int mask)
2161 +{
2162 +	u32 orig_value, new_value, reread_value;
2163 +
2164 +	if (0 != mode && 1 != mode) {
2165 +		printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2166 +				__FUNCTION__);
2167 +		return;
2168 +	}
2169 +
2170 +	orig_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2171 +
2172 +	if (0 == mode)
2173 +		new_value = orig_value & ~(mask);
2174 +	else
2175 +		new_value = orig_value | mask;
2176 +
2177 +	writel_relaxed(new_value, cache_base + L310_PREFETCH_CTRL);
2178 +	reread_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2179 +
2180 +	printk("l2x0 prefetch: orig: 0x%8x  wanted: 0x%8x  new: 0x%8x\n",
2181 +			orig_value, new_value, reread_value);
2182 +}
2183 +
2184 +int litmus_l2_double_linefill_proc_handler(struct ctl_table *table, int write,
2185 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2186 +{
2187 +	int ret, mode;
2188 +
2189 +	mutex_lock(&l2x0_prefetch_mutex);
2190 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2191 +	if (!ret && write) {
2192 +		mode = *((int*)table->data);
2193 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DOUBLE_LINEFILL);
2194 +	}
2195 +	mutex_unlock(&l2x0_prefetch_mutex);
2196 +
2197 +	return ret;
2198 +}
2199 +
2200 +int litmus_l2_data_prefetch_proc_handler(struct ctl_table *table, int write,
2201 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2202 +{
2203 +	int ret, mode;
2204 +
2205 +	mutex_lock(&l2x0_prefetch_mutex);
2206 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2207 +	if (!ret && write) {
2208 +		mode = *((int*)table->data);
2209 +		l2x0_prefetch_change(mode, L2X0_PREFETCH_DATA_PREFETCH|L2X0_PREFETCH_INST_PREFETCH);
2210 +	}
2211 +	mutex_unlock(&l2x0_prefetch_mutex);
2212 +
2213 +	return ret;
2214 +}
2215 +
2216 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
2217 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2218 +
2219 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
2220 +		void __user *buffer, size_t *lenp, loff_t *ppos);
2221 +		
2222 +static struct ctl_table cache_table[] =
2223 +{
2224 +	{
2225 +		.procname	= "C0_LA_way",
2226 +		.mode		= 0666,
2227 +		.proc_handler	= way_partition_handler,
2228 +		.data		= &way_partitions[0],
2229 +		.maxlen		= sizeof(way_partitions[0]),
2230 +		.extra1		= &way_partition_min,
2231 +		.extra2		= &way_partition_max,
2232 +	},	
2233 +	{
2234 +		.procname	= "C0_LB_way",
2235 +		.mode		= 0666,
2236 +		.proc_handler	= way_partition_handler,
2237 +		.data		= &way_partitions[1],
2238 +		.maxlen		= sizeof(way_partitions[1]),
2239 +		.extra1		= &way_partition_min,
2240 +		.extra2		= &way_partition_max,
2241 +	},	
2242 +	{
2243 +		.procname	= "C1_LA_way",
2244 +		.mode		= 0666,
2245 +		.proc_handler	= way_partition_handler,
2246 +		.data		= &way_partitions[2],
2247 +		.maxlen		= sizeof(way_partitions[2]),
2248 +		.extra1		= &way_partition_min,
2249 +		.extra2		= &way_partition_max,
2250 +	},
2251 +	{
2252 +		.procname	= "C1_LB_way",
2253 +		.mode		= 0666,
2254 +		.proc_handler	= way_partition_handler,
2255 +		.data		= &way_partitions[3],
2256 +		.maxlen		= sizeof(way_partitions[3]),
2257 +		.extra1		= &way_partition_min,
2258 +		.extra2		= &way_partition_max,
2259 +	},
2260 +	{
2261 +		.procname	= "C2_LA_way",
2262 +		.mode		= 0666,
2263 +		.proc_handler	= way_partition_handler,
2264 +		.data		= &way_partitions[4],
2265 +		.maxlen		= sizeof(way_partitions[4]),
2266 +		.extra1		= &way_partition_min,
2267 +		.extra2		= &way_partition_max,
2268 +	},
2269 +	{
2270 +		.procname	= "C2_LB_way",
2271 +		.mode		= 0666,
2272 +		.proc_handler	= way_partition_handler,
2273 +		.data		= &way_partitions[5],
2274 +		.maxlen		= sizeof(way_partitions[5]),
2275 +		.extra1		= &way_partition_min,
2276 +		.extra2		= &way_partition_max,
2277 +	},
2278 +	{
2279 +		.procname	= "C3_LA_way",
2280 +		.mode		= 0666,
2281 +		.proc_handler	= way_partition_handler,
2282 +		.data		= &way_partitions[6],
2283 +		.maxlen		= sizeof(way_partitions[6]),
2284 +		.extra1		= &way_partition_min,
2285 +		.extra2		= &way_partition_max,
2286 +	},
2287 +	{
2288 +		.procname	= "C3_LB_way",
2289 +		.mode		= 0666,
2290 +		.proc_handler	= way_partition_handler,
2291 +		.data		= &way_partitions[7],
2292 +		.maxlen		= sizeof(way_partitions[7]),
2293 +		.extra1		= &way_partition_min,
2294 +		.extra2		= &way_partition_max,
2295 +	},	
2296 +	{
2297 +		.procname	= "Call_LC_way",
2298 +		.mode		= 0666,
2299 +		.proc_handler	= way_partition_handler,
2300 +		.data		= &way_partitions[8],
2301 +		.maxlen		= sizeof(way_partitions[8]),
2302 +		.extra1		= &way_partition_min,
2303 +		.extra2		= &way_partition_max,
2304 +	},		
2305 +	{
2306 +		.procname	= "lock_all",
2307 +		.mode		= 0666,
2308 +		.proc_handler	= lock_all_handler,
2309 +		.data		= &lock_all,
2310 +		.maxlen		= sizeof(lock_all),
2311 +		.extra1		= &zero,
2312 +		.extra2		= &one,
2313 +	},
2314 +	{
2315 +		.procname	= "l1_prefetch",
2316 +		.mode		= 0644,
2317 +		.proc_handler	= litmus_l1_prefetch_proc_handler,
2318 +		.data		= &l1_prefetch_proc,
2319 +		.maxlen		= sizeof(l1_prefetch_proc),
2320 +	},
2321 +	{
2322 +		.procname	= "l2_prefetch_hint",
2323 +		.mode		= 0644,
2324 +		.proc_handler	= litmus_l2_prefetch_hint_proc_handler,
2325 +		.data		= &l2_prefetch_hint_proc,
2326 +		.maxlen		= sizeof(l2_prefetch_hint_proc),
2327 +	},
2328 +	{
2329 +		.procname	= "l2_double_linefill",
2330 +		.mode		= 0644,
2331 +		.proc_handler	= litmus_l2_double_linefill_proc_handler,
2332 +		.data		= &l2_double_linefill_proc,
2333 +		.maxlen		= sizeof(l2_double_linefill_proc),
2334 +	},
2335 +	{
2336 +		.procname	= "l2_data_prefetch",
2337 +		.mode		= 0644,
2338 +		.proc_handler	= litmus_l2_data_prefetch_proc_handler,
2339 +		.data		= &l2_data_prefetch_proc,
2340 +		.maxlen		= sizeof(l2_data_prefetch_proc),
2341 +	},
2342 +	{
2343 +		.procname	= "os_isolation",
2344 +		.mode		= 0644,
2345 +		.proc_handler	= os_isolation_proc_handler,
2346 +		.data		= &os_isolation,
2347 +		.maxlen		= sizeof(os_isolation),
2348 +	},
2349 +	{
2350 +		.procname	= "use_part",
2351 +		.mode		= 0644,
2352 +		.proc_handler	= use_part_proc_handler,
2353 +		.data		= &use_part,
2354 +		.maxlen		= sizeof(use_part),
2355 +	},
2356 +	{
2357 +		.procname	= "do_perf_test",
2358 +		.mode		= 0644,
2359 +		.proc_handler	= do_perf_test_proc_handler,
2360 +	},
2361 +	{
2362 +		.procname	= "setup_flusher",
2363 +		.mode		= 0644,
2364 +		.proc_handler	= setup_flusher_proc_handler,
2365 +	},
2366 +	{
2367 +		.procname	= "lockdown_reg_0",
2368 +		.mode		= 0644,
2369 +		.proc_handler	= lockdown_reg_handler,
2370 +		.data		= &lockdown_reg[0],
2371 +		.maxlen		= sizeof(lockdown_reg[0]),
2372 +		.extra1		= &way_partition_min,
2373 +		.extra2		= &way_partition_max,
2374 +	},
2375 +	{
2376 +		.procname	= "lockdown_reg_1",
2377 +		.mode		= 0644,
2378 +		.proc_handler	= lockdown_reg_handler,
2379 +		.data		= &lockdown_reg[1],
2380 +		.maxlen		= sizeof(lockdown_reg[1]),
2381 +		.extra1		= &way_partition_min,
2382 +		.extra2		= &way_partition_max,
2383 +	},
2384 +	{
2385 +		.procname	= "lockdown_reg_2",
2386 +		.mode		= 0644,
2387 +		.proc_handler	= lockdown_reg_handler,
2388 +		.data		= &lockdown_reg[2],
2389 +		.maxlen		= sizeof(lockdown_reg[2]),
2390 +		.extra1		= &way_partition_min,
2391 +		.extra2		= &way_partition_max,
2392 +	},
2393 +	{
2394 +		.procname	= "lockdown_reg_3",
2395 +		.mode		= 0644,
2396 +		.proc_handler	= lockdown_reg_handler,
2397 +		.data		= &lockdown_reg[3],
2398 +		.maxlen		= sizeof(lockdown_reg[3]),
2399 +		.extra1		= &way_partition_min,
2400 +		.extra2		= &way_partition_max,
2401 +	},
2402 +	{
2403 +		.procname	= "lockdown_regs",
2404 +		.mode		= 0644,
2405 +		.proc_handler	= lockdown_global_handler,
2406 +		.data		= &lockdown_reg[8],
2407 +		.maxlen		= sizeof(lockdown_reg[8]),
2408 +		.extra1		= &way_partition_min,
2409 +		.extra2		= &way_partition_max,
2410 +	},
2411 +	{ }
2412 +};
2413 +
2414 +static struct ctl_table litmus_dir_table[] = {
2415 +	{
2416 +		.procname	= "litmus",
2417 + 		.mode		= 0555,
2418 +		.child		= cache_table,
2419 +	},
2420 +	{ }
2421 +};
2422 +
2423 +u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
2424 +{
2425 +	u32 v = 0;
2426 +
2427 +	__asm__ __volatile__ (
2428 +"	.align 5\n"
2429 +"	str	%[lockval], [%[cachereg]]\n"
2430 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2431 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2432 +"	bgt	1b\n				@ read more, if necessary\n"
2433 +	: [addr] "+r" (start),
2434 +	  [val] "+r" (v)
2435 +	: [end] "r" (end),
2436 +#ifdef CONFIG_CACHE_L2X0
2437 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2438 +#else
2439 +	  [cachereg] "r" (lockreg_d),
2440 +#endif
2441 +	  [lockval] "r" (lock_val)
2442 +	: "cc");
2443 +
2444 +	return v;
2445 +}
2446 +
2447 +
2448 +/*
2449 + * Prefetch by reading the first word of each cache line in a page.
2450 + *
2451 + * @lockdown_reg: address of the lockdown register to write
2452 + * @lock_val: value to be written to @lockdown_reg
2453 + * @unlock_val: will unlock the cache to this value
2454 + * @addr: start address to be prefetched
2455 + * @end_addr: end address to prefetch (exclusive)
2456 + *
2457 + * Assumes: addr < end_addr AND addr != end_addr
2458 + */
2459 +u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
2460 +{
2461 +#ifndef CONFIG_CACHE_L2X0
2462 +	unsigned long flags;
2463 +#endif
2464 +	u32 v = 0;
2465 +
2466 +#ifndef CONFIG_CACHE_L2X0
2467 +	raw_spin_lock_irqsave(&prefetch_lock, flags);
2468 +#endif
2469 +
2470 +	__asm__ __volatile__ (
2471 +"	.align 5\n"
2472 +"	str	%[lockval], [%[cachereg]]\n"
2473 +"1:	ldr	%[val], [%[addr]], #32		@ 32 bytes = 1 cache line\n"
2474 +"	cmp	%[end], %[addr]			@ subtracts addr from end\n"
2475 +"	bgt	1b\n				@ read more, if necessary\n"
2476 +"	str	%[unlockval], [%[cachereg]]\n"
2477 +	: [addr] "+r" (start),
2478 +	  [val] "+r" (v)
2479 +	: [end] "r" (end),
2480 +#ifdef CONFIG_CACHE_L2X0
2481 +	  [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2482 +#else
2483 +	  [cachereg] "r" (lockreg_d),
2484 +#endif
2485 +	  [lockval] "r" (lock_val),
2486 +	  [unlockval] "r" (unlock_val)
2487 +	: "cc");
2488 +
2489 +#ifndef CONFIG_CACHE_L2X0
2490 +	raw_spin_unlock_irqrestore(&prefetch_lock, flags);
2491 +#endif
2492 +
2493 +	return v;
2494 +}
2495 +
2496 +static long update_timeval(struct timespec lhs, struct timespec rhs)
2497 +{
2498 +	long val;
2499 +	struct timespec ts;
2500 +
2501 +	ts = timespec_sub(rhs, lhs);
2502 +	val = ts.tv_sec*NSEC_PER_SEC + ts.tv_nsec;
2503 +
2504 +	return val;
2505 +}
2506 +
2507 +extern void v7_flush_kern_dcache_area(void *, size_t);
2508 +extern void v7_flush_kern_cache_all(void);
2509 +/*
2510 + * Ensure that this page is not in the L1 or L2 cache.
2511 + * Since the L1 cache is VIPT and the L2 cache is PIPT, we can use either the
2512 + * kernel or user vaddr.
2513 + */
2514 +void color_flush_page(void *vaddr, size_t size)
2515 +{
2516 +	v7_flush_kern_dcache_area(vaddr, size);
2517 +	//v7_flush_kern_cache_all();
2518 +}
2519 +
2520 +extern struct page* get_colored_page(unsigned long color);
2521 +
2522 +int setup_flusher_array(void)
2523 +{
2524 +	int color, way, ret = 0;
2525 +	struct page *page;
2526 +
2527 +	if (flusher_pages != NULL)
2528 +		goto out;
2529 +
2530 +	flusher_pages = (void***) kmalloc(MAX_NR_WAYS
2531 +			* sizeof(*flusher_pages), GFP_KERNEL);
2532 +	if (!flusher_pages) {
2533 +		printk(KERN_WARNING "No memory for flusher array!\n");
2534 +		ret = -EINVAL;
2535 +		goto out;
2536 +	}
2537 +
2538 +	for (way = 0; way < MAX_NR_WAYS; way++) {
2539 +		void **flusher_color_arr;
2540 +		flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
2541 +				* MAX_NR_COLORS, GFP_KERNEL);
2542 +		if (!flusher_color_arr) {
2543 +			printk(KERN_WARNING "No memory for flusher array!\n");
2544 +			ret = -ENOMEM;
2545 +			goto out_free;
2546 +		}
2547 +
2548 +		flusher_pages[way] = flusher_color_arr;
2549 +
2550 +		for (color = 0; color < MAX_NR_COLORS; color++) {
2551 +			int node;
2552 +			switch (color) {
2553 +				case 0:
2554 +					node = 48;
2555 +					break;
2556 +				case 1:
2557 +					node = 49;
2558 +					break;
2559 +				case 2:
2560 +					node = 50;
2561 +					break;
2562 +				case 3:
2563 +					node = 51;
2564 +					break;
2565 +				case 4:
2566 +					node = 68;
2567 +					break;
2568 +				case 5:
2569 +					node = 69;
2570 +					break;
2571 +				case 6:
2572 +					node = 86;
2573 +					break;
2574 +				case 7:
2575 +					node = 87;
2576 +					break;
2577 +				case 8:
2578 +					node = 88;
2579 +					break;
2580 +				case 9:
2581 +					node = 105;
2582 +					break;
2583 +				case 10:
2584 +					node = 106;
2585 +					break;
2586 +				case 11:
2587 +					node = 107;
2588 +					break;
2589 +				case 12:
2590 +					node = 108;
2591 +					break;					
2592 +				case 13:
2593 +					node = 125;
2594 +					break;
2595 +				case 14:
2596 +					node = 126;
2597 +					break;
2598 +				case 15:
2599 +					node = 127;
2600 +					break;
2601 +			}	
2602 +			page = get_colored_page(node);
2603 +			if (!page) {
2604 +				printk(KERN_WARNING "no more colored pages\n");
2605 +				ret = -EINVAL;
2606 +				goto out_free;
2607 +			}
2608 +			flusher_pages[way][color] = page_address(page);
2609 +			if (!flusher_pages[way][color]) {
2610 +				printk(KERN_WARNING "bad page address\n");
2611 +				ret = -EINVAL;
2612 +				goto out_free;
2613 +			}
2614 +		}
2615 +	}
2616 +out:
2617 +	return ret;
2618 +out_free:
2619 +	for (way = 0; way < MAX_NR_WAYS; way++) {
2620 +		for (color = 0; color < MAX_NR_COLORS; color++) {
2621 +			/* not bothering to try and give back colored pages */
2622 +		}
2623 +		kfree(flusher_pages[way]);
2624 +	}
2625 +	kfree(flusher_pages);
2626 +	flusher_pages = NULL;
2627 +	return ret;
2628 +}
2629 +
2630 +void flush_cache(int all)
2631 +{
2632 +	int way, color, cpu;
2633 +	unsigned long flags;
2634 +	
2635 +	raw_spin_lock_irqsave(&cache_lock, flags);
2636 +	cpu = raw_smp_processor_id();
2637 +	
2638 +	prev_lbm_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2639 +	prev_lbm_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2640 +	for (way=0;way<MAX_NR_WAYS;way++) {
2641 +		if (( (0x00000001 << way) & (prev_lbm_d_reg[cpu]) ) &&
2642 +			!all)
2643 +			continue;
2644 +		for (color=0;color<MAX_NR_COLORS;color++) {
2645 +			void *vaddr = flusher_pages[way][color];
2646 +			u32 lvalue  = unlocked_way[way];
2647 +			color_read_in_mem_lock(lvalue, LOCK_ALL,
2648 +					       vaddr, vaddr + PAGE_SIZE);
2649 +		}
2650 +
2651 +	}
2652 +
2653 +	writel_relaxed(prev_lbm_i_reg[cpu], ld_i_reg(cpu));
2654 +	writel_relaxed(prev_lbm_d_reg[cpu], ld_d_reg(cpu));
2655 +	raw_spin_unlock_irqrestore(&cache_lock, flags);
2656 +}
2657 +
2658 +/* src = shared, dst = local */
2659 +#if 1 // random
2660 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
2661 +{
2662 +	/* size is in KB */
2663 +	long ret = 0;
2664 +	lt_t t1, t2;
2665 +	int numlines = size * CACHELINES_IN_1KB;
2666 +	int next, sum = 0, ran;
2667 +	unsigned long flags;
2668 +	
2669 +	get_random_bytes(&ran, sizeof(int));
2670 +	next = ran % ((size*1024)/sizeof(cacheline_t));
2671 +	
2672 +	//preempt_disable();
2673 +	if (type == 1) {
2674 +		int i, j;
2675 +		color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
2676 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2677 +		
2678 +		local_irq_save(flags);
2679 +		t1 = litmus_clock();
2680 +		for (i = 0; i < numlines; i++) {
2681 +			next = src[next].line[0];
2682 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2683 +				//dst[next].line[j] = src[next].line[j]; // read
2684 +				src[next].line[j] = dst[next].line[j]; // write
2685 +			}			
2686 +		}
2687 +		t2 = litmus_clock();
2688 +		local_irq_restore(flags);
2689 +		sum = next + (int)t2;
2690 +		t2 -= t1;
2691 +		ret = put_user(t2, ts);
2692 +	}
2693 +	else {
2694 +		int i, j;
2695 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2696 +		local_irq_save(flags);
2697 +		t1 = litmus_clock();
2698 +		for (i = 0; i < numlines; i++) {
2699 +			next = src[next].line[0];
2700 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2701 +				//dst[next].line[j] = src[next].line[j]; //read
2702 +				src[next].line[j] = dst[next].line[j]; //write
2703 +			}			
2704 +		}
2705 +		t2 = litmus_clock();
2706 +		local_irq_restore(flags);
2707 +		sum = next + (int)t2;
2708 +		t2 -= t1;
2709 +		ret = put_user(t2, ts);
2710 +		v7_flush_kern_dcache_area(src, size*1024);
2711 +	}
2712 +	//preempt_enable();
2713 +	flush_cache(1);
2714 +
2715 +	return ret;
2716 +}
2717 +#else
2718 +// sequential
2719 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
2720 +{
2721 +	/* size is in KB */
2722 +	long ret = 0;
2723 +	lt_t t1, t2;
2724 +	int numlines = size * CACHELINES_IN_1KB;
2725 +	int sum = 0;
2726 +	unsigned long flags;
2727 +	
2728 +	//preempt_disable();
2729 +	if (type == 1) {
2730 +		int i, j;
2731 +		color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
2732 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2733 +		
2734 +		local_irq_save(flags);
2735 +		t1 = litmus_clock();
2736 +		for (i = 0; i < numlines; i++) {
2737 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2738 +				//dst[i].line[j] = src[i].line[j]; // read
2739 +				src[i].line[j] = dst[i].line[j]; // write
2740 +			}			
2741 +		}
2742 +		t2 = litmus_clock();
2743 +		local_irq_restore(flags);
2744 +		sum = (int)(t1 + t2);
2745 +		t2 -= t1;
2746 +		ret = put_user(t2, ts);
2747 +	}
2748 +	else {
2749 +		int i, j;
2750 +		color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
2751 +		local_irq_save(flags);
2752 +		t1 = litmus_clock();
2753 +		for (i = 0; i < numlines; i++) {
2754 +			for (j = 0; j < INTS_IN_CACHELINE; j++) {
2755 +				//dst[i].line[j] = src[i].line[j]; //read
2756 +				src[i].line[j] = dst[i].line[j]; //write
2757 +			}			
2758 +		}
2759 +		t2 = litmus_clock();
2760 +		local_irq_restore(flags);
2761 +		sum = (int)(t1 + t2);
2762 +		t2 -= t1;
2763 +		ret = put_user(t2, ts);
2764 +		v7_flush_kern_dcache_area(src, size*1024);
2765 +	}
2766 +	//preempt_enable();
2767 +	flush_cache(1);
2768 +
2769 +	return ret;
2770 +}
2771 +#endif
2772 +
2773 +asmlinkage long sys_lock_buffer(void* vaddr, size_t size, u32 lock_way, u32 unlock_way)
2774 +{
2775 +	/* size is in bytes */
2776 +	long ret = 0;
2777 +	int i;
2778 +	u32 lock_val, unlock_val;
2779 +	
2780 +	lock_val = ~lock_way & 0x0000ffff;
2781 +	unlock_val = ~unlock_way & 0x0000ffff;
2782 +	color_read_in_mem_lock(lock_val, unlock_val, (void*)vaddr, (void*)vaddr + size);
2783 +	
2784 +	return ret;
2785 +}
2786 +
2787 +#define TRIALS 1000
2788 +
2789 +static int perf_test(void) {
2790 +	struct timespec before, after;
2791 +	struct page *page;
2792 +	void *vaddr;
2793 +	u32 *data;
2794 +	long time, flush_time;
2795 +	int i, num_pages = 1;
2796 +	unsigned int order = 4;
2797 +
2798 +	for (i = 0; i < order; i++) {
2799 +		num_pages = num_pages*2;
2800 +	}
2801 +
2802 +	printk("Number of pages: %d\n", num_pages);
2803 +	//page = alloc_page(__GFP_MOVABLE);
2804 +	page = alloc_pages(__GFP_MOVABLE, order);
2805 +	if (!page) {
2806 +		printk(KERN_WARNING "No memory\n");
2807 +		return -ENOMEM;
2808 +	}
2809 +
2810 +	vaddr = page_address(page);
2811 +	if (!vaddr)
2812 +		printk(KERN_WARNING "%s: vaddr is null\n", __FUNCTION__);
2813 +	data = (u32*) vaddr;
2814 +
2815 +	getnstimeofday(&before);
2816 +	barrier();
2817 +	for (i = 0; i < TRIALS; i++) {
2818 +		color_flush_page(vaddr, PAGE_SIZE*num_pages);
2819 +	}
2820 +	barrier();
2821 +	getnstimeofday(&after);
2822 +	time = update_timeval(before, after);
2823 +	printk("Average for flushes without re-reading: %ld\n", time / TRIALS);
2824 +	flush_time = time / TRIALS;
2825 +
2826 +	color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2827 +	
2828 +	barrier();
2829 +	getnstimeofday(&before);
2830 +	barrier();
2831 +	for (i = 0; i < TRIALS; i++) {
2832 +		color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2833 +	}
2834 +	barrier();
2835 +	getnstimeofday(&after);
2836 +	time = update_timeval(before, after);
2837 +	printk("Average for read from cache: %ld\n", time / TRIALS);
2838 +
2839 +	getnstimeofday(&before);
2840 +	barrier();
2841 +	for (i = 0; i < TRIALS; i++) {
2842 +		color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2843 +		color_flush_page(vaddr, PAGE_SIZE*num_pages);
2844 +	}
2845 +	barrier();
2846 +	getnstimeofday(&after);
2847 +	time = update_timeval(before, after);
2848 +	printk("Average for read from mem: %ld (%ld)\n", time / TRIALS - flush_time, time / TRIALS);
2849 +
2850 +	// write in locked way
2851 +	color_read_in_mem_lock(nr_unlocked_way[2], LOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2852 +	for (i = 0; i < PAGE_SIZE*num_pages/sizeof(u32); i++) {
2853 +		data[i] = i%63353;
2854 +	}
2855 +	// read
2856 +	barrier();
2857 +	getnstimeofday(&before);
2858 +	barrier();
2859 +	for (i = 0; i < TRIALS; i++) {
2860 +		color_read_in_mem(unlocked_way[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
2861 +	}
2862 +	barrier();
2863 +	getnstimeofday(&after);
2864 +	time = update_timeval(before, after);
2865 +	printk("Average for read in after write: %ld\n", time / TRIALS);
2866 +	
2867 +	
2868 +	//free_page((unsigned long)vaddr);
2869 +	free_pages((unsigned long)vaddr, order);
2870 +
2871 +	return 0;
2872 +}
2873 +
2874 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
2875 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2876 +{
2877 +	int ret = 0;
2878 +
2879 +	if (write) {
2880 +		ret = perf_test();
2881 +	}
2882 +
2883 +	return ret;
2884 +}
2885 +
2886 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
2887 +		void __user *buffer, size_t *lenp, loff_t *ppos)
2888 +{
2889 +	int ret = -EINVAL;
2890 +
2891 +	if (write && flusher_pages == NULL) {
2892 +		ret = setup_flusher_array();
2893 +		printk(KERN_INFO "setup flusher return: %d\n", ret);
2894 +	
2895 +	}
2896 +	else if (flusher_pages) {
2897 +		printk(KERN_INFO "flusher_pages is already set!\n");
2898 +		ret = 0;
2899 +	}
2900 +	
2901 +	return ret;
2902 +}
2903 +
2904 +static struct ctl_table_header *litmus_sysctls;
2905 +
2906 +static int __init litmus_sysctl_init(void)
2907 +{
2908 +	int ret = 0;
2909 +
2910 +	printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n");
2911 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
2912 +	if (!litmus_sysctls) {
2913 +		printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n");
2914 +		ret = -EFAULT;
2915 +		goto out;
2916 +	}
2917 +
2918 +	way_partition_min = 0x00000000;
2919 +	way_partition_max = 0x0000FFFF;
2920 +	
2921 +out:
2922 +	return ret;
2923 +}
2924 +
2925 +module_init(litmus_sysctl_init);
2926 diff --git litmus/color_shm.c litmus/color_shm.c
2927 new file mode 100644
2928 index 0000000..d4913cd
2929 --- /dev/null
2930 +++ litmus/color_shm.c
2931 @@ -0,0 +1,402 @@
2932 +#include <linux/sched.h>
2933 +#include <linux/mm.h>
2934 +#include <linux/fs.h>
2935 +#include <linux/miscdevice.h>
2936 +#include <linux/spinlock.h>
2937 +#include <linux/module.h>
2938 +#include <linux/highmem.h>
2939 +#include <linux/slab.h>
2940 +#include <linux/mutex.h>
2941 +#include <asm/uaccess.h>
2942 +
2943 +#include <litmus/litmus.h>
2944 +
2945 +#define DEV_NAME	"litmus/color_shm"
2946 +
2947 +/* Major number assigned to our device. 
2948 + * Refer Documentation/devices.txt */
2949 +#define SHM_MAJOR			240
2950 +#define MAX_COLORED_PAGE	256
2951 +#define NUM_BANKS			8
2952 +#define NUM_COLORS			16
2953 +
2954 +static struct mutex dev_lock;
2955 +static int bypass_cache;
2956 +
2957 +struct color_ioctl_cmd {
2958 +	unsigned int color;
2959 +	unsigned int bank;
2960 +};
2961 +
2962 +struct color_ioctl_offset {
2963 +	unsigned long offset;
2964 +	int lock;
2965 +};
2966 +
2967 +#define SET_COLOR_SHM_CMD		_IOW(SHM_MAJOR, 0x1, struct color_ioctl_cmd)
2968 +#define SET_COLOR_SHM_OFFSET	_IOW(SHM_MAJOR, 0x2, struct color_ioctl_offset)
2969 +
2970 +struct color_ioctl_cmd color_param;
2971 +struct color_ioctl_offset color_offset;
2972 +
2973 +static int mmap_common_checks(struct vm_area_struct *vma)
2974 +{
2975 +	/* you can only map the "first" page */
2976 +	if (vma->vm_pgoff != 0)
2977 +		return -EINVAL;
2978 +
2979 +	return 0;
2980 +}
2981 +
2982 +static void mmap_common_vma_flags(struct vm_area_struct *vma)
2983 +{
2984 +	/* This mapping should not be kept across forks,
2985 +	 * cannot be expanded, and is not a "normal" page. */
2986 +	//vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO | VM_SHARED | VM_MAYSHARE;
2987 +	vma->vm_flags |= VM_SHARED | VM_MAYSHARE | VM_LOCKED;
2988 +
2989 +	/* We don't want the first write access to trigger a "minor" page fault
2990 +	 * to mark the page as dirty.  This is transient, private memory, we
2991 +	 * don't care if it was touched or not. __S011 means RW access, but not
2992 +	 * execute, and avoids copy-on-write behavior.
2993 +	 * See protection_map in mmap.c.  */
2994 +	vma->vm_page_prot = PAGE_SHARED;
2995 +}
2996 +
2997 +#define vma_nr_pages(vma) \
2998 +	({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;})
2999 +
3000 +extern struct page* get_colored_page(unsigned long color);
3001 +
3002 +static int do_map_colored_page(struct vm_area_struct *vma,
3003 +		const unsigned long addr,
3004 +		const unsigned long color_no)
3005 +{
3006 +	int err = 0;
3007 +	unsigned long offset = 2048;
3008 +	
3009 +	struct page *page = get_colored_page(color_no);
3010 +
3011 +	if (!page) {
3012 +		printk(KERN_INFO "Could not get page with color %lu.\n",
3013 +				color_no);
3014 +		err = -ENOMEM;
3015 +		goto out;
3016 +	}
3017 +
3018 +	printk(KERN_INFO "vma: %p  addr: 0x%lx  color_no: %lu\n",
3019 +			vma, addr, color_no);
3020 +	
3021 +	printk(KERN_INFO "vm_start: %lu vm_end: %lu\n",
3022 +			vma->vm_start, vma->vm_end);
3023 +
3024 +	printk(KERN_INFO "inserting page (pa: 0x%lx) at vaddr: 0x%lx  "
3025 +			"flags: 0x%lx  prot: 0x%lx\n",
3026 +			page_to_phys(page), addr,
3027 +			vma->vm_flags, pgprot_val(vma->vm_page_prot));
3028 +
3029 +	
3030 +	err = vm_insert_page(vma, addr, page);
3031 +	if (err) {
3032 +		printk(KERN_INFO "vm_insert_page() failed (%d)\n", err);
3033 +		err = -EINVAL;
3034 +		goto out;
3035 +	}
3036 +out:
3037 +	return err;
3038 +}
3039 +	
3040 +static int do_map_colored_pages(struct vm_area_struct *vma)
3041 +{
3042 +	const unsigned long nr_pages = vma_nr_pages(vma);
3043 +	unsigned long nr_mapped;
3044 +	int i, start_bank = -1, start_color = -1;
3045 +	int cur_bank = -1, cur_color = -1, err = 0;
3046 +	int colors[16] = {0}, banks[8] = {0};
3047 +
3048 +	if (bypass_cache == 1)
3049 +		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3050 +	
3051 +	for (i = 0; i < NUM_BANKS; i++) {
3052 +		if (((color_param.bank >> i)&0x1) == 1)
3053 +			banks[i] = 1;
3054 +	}
3055 +	
3056 +	for (i = 0; i < NUM_COLORS; i++) {
3057 +		if (((color_param.color >> i)&0x1) == 1)
3058 +			colors[i] = 1;
3059 +	}
3060 +	
3061 +	for (i = 0; i < NUM_BANKS; i++) {
3062 +		if (banks[i] == 1) {
3063 +			start_bank = i;
3064 +			break;
3065 +		}
3066 +	}
3067 +	for (i = 0; i < NUM_COLORS; i++) {
3068 +		if (colors[i] == 1) {
3069 +			start_color = i;
3070 +			break;
3071 +		}
3072 +	}	
3073 +		
3074 +	cur_bank = start_bank;
3075 +	cur_color = start_color;
3076 +	
3077 +	for (i = 0; i < NUM_BANKS; i++) {
3078 +		printk(KERN_INFO "BANK[%d] = %d\n", i, banks[i]);
3079 +	}
3080 +	printk(KERN_INFO "cur_bank = %d\n", cur_bank);
3081 +	for (i = 0; i < NUM_COLORS; i++) {
3082 +		printk(KERN_INFO "COLOR[%d] = %d\n", i, colors[i]);
3083 +	}
3084 +	printk(KERN_INFO "cur_color = %d\n", cur_color);
3085 +	
3086 +	
3087 +	TRACE_CUR("allocating %lu pages (flags:%lx prot:%lx)\n",
3088 +			nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot));
3089 +	
3090 +	for (nr_mapped = 0; nr_mapped < nr_pages; nr_mapped++) {
3091 +		const unsigned long addr = vma->vm_start + (nr_mapped << PAGE_SHIFT);
3092 +		const unsigned long color_no = cur_bank*NUM_COLORS + cur_color;
3093 +		
3094 +		err = do_map_colored_page(vma, addr, color_no);
3095 +		printk(KERN_INFO "mapped bank[%d], color[%d], color_no = %lu at 0x%lx\n", 
3096 +			cur_bank, cur_color, color_no, addr);
3097 +		if (err) {
3098 +			TRACE_CUR("Could not map colored page set.\n");
3099 +			err = -EINVAL;
3100 +			goto out;
3101 +		}
3102 +		do {
3103 +			cur_color++;
3104 +		} while(colors[cur_color] == 0);
3105 +		
3106 +		if (cur_color >= NUM_COLORS) {
3107 +			do {
3108 +				cur_bank++;
3109 +			} while(banks[cur_bank] == 0);
3110 +			cur_color = start_color;
3111 +		}
3112 +		
3113 +		if (cur_bank >= NUM_BANKS) {
3114 +			cur_bank = start_bank;
3115 +		}			
3116 +	}
3117 +	TRACE_CUR("Successfully mapped %lu pages.\n", nr_mapped);
3118 + out:
3119 +	return err;
3120 +}
3121 +
3122 +static int map_colored_pages(struct vm_area_struct *vma)
3123 +{
3124 +	int err = 0;
3125 +
3126 +	printk(KERN_INFO "User requests %lu pages.\n", vma_nr_pages(vma));
3127 +	if (MAX_COLORED_PAGE < vma_nr_pages(vma)) {
3128 +		TRACE_CUR("Max page request %lu but want %lu.\n",
3129 +				MAX_COLORED_PAGE, vma_nr_pages(vma));
3130 +		err = -EINVAL;
3131 +		goto out;
3132 +	}
3133 +	err = do_map_colored_pages(vma);
3134 +out:
3135 +	return err;
3136 +}
3137 +
3138 +static void litmus_color_shm_vm_close(struct vm_area_struct *vma)
3139 +{
3140 +
3141 +	TRACE_CUR("flags=0x%lx prot=0x%lx\n",
3142 +			vma->vm_flags, pgprot_val(vma->vm_page_prot));
3143 +
3144 +	TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n",
3145 +			(void*) vma->vm_start, (void*) vma->vm_end, vma,
3146 +			vma->vm_private_data);
3147 +
3148 +}
3149 +
3150 +static int litmus_color_shm_vm_fault(struct vm_area_struct *vma,
3151 +		struct vm_fault *vmf)
3152 +{
3153 +	/* This function should never be called, since
3154 +	 * all pages should have been mapped by mmap()
3155 +	 * already. */
3156 +	TRACE_CUR("flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff);
3157 +	printk(KERN_INFO "flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff);
3158 +
3159 +	printk(KERN_INFO "Page fault in color ctrl page! prot=0x%lx\n", pgprot_val(vma->vm_page_prot));
3160 +
3161 +	return VM_FAULT_SIGBUS;
3162 +}
3163 +
3164 +static struct vm_operations_struct litmus_color_shm_vm_ops = {
3165 +	.close	= litmus_color_shm_vm_close,
3166 +	.fault	= litmus_color_shm_vm_fault,
3167 +};
3168 +
3169 +static int litmus_color_shm_mmap(struct file *filp, struct vm_area_struct *vma)
3170 +{
3171 +	int err = 0;
3172 +
3173 +	printk(KERN_INFO "mmap called\n");
3174 +	
3175 +	if (color_param.color == 0x00000000 || color_param.bank == 0x00000000) {
3176 +		printk(KERN_INFO "color_info not set.\n");
3177 +		return -EINVAL;
3178 +	}
3179 +	if (color_offset.offset == 0xffffffff || color_offset.lock == -1) {
3180 +		printk(KERN_INFO "color_offset not set.\n");
3181 +		return -EINVAL;
3182 +	}
3183 +	
3184 +	err = mmap_common_checks(vma);
3185 +	if (err) {
3186 +		TRACE_CUR("failed mmap common checks\n");
3187 +		goto out;
3188 +	}
3189 +
3190 +	vma->vm_ops = &litmus_color_shm_vm_ops;
3191 +	mmap_common_vma_flags(vma);
3192 +
3193 +	err = map_colored_pages(vma);
3194 +
3195 +	TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags,
3196 +			pgprot_val(vma->vm_page_prot));
3197 +out:
3198 +	color_param.color == 0x00000000;
3199 +	color_param.bank == 0x00000000;
3200 +	color_offset.offset == 0xffffffff;
3201 +	color_offset.lock == -1;
3202 +	
3203 +	return err;
3204 +
3205 +}
3206 +
3207 +static long litmus_color_shm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3208 +{
3209 +	long err = -ENOIOCTLCMD;
3210 +	struct color_ioctl_cmd color_info;
3211 +	struct color_ioctl_offset color_off;
3212 +				
3213 +	printk(KERN_INFO "color_shm ioctl\n");
3214 +	
3215 +	if (_IOC_TYPE(cmd) != SHM_MAJOR)
3216 +		return -ENOTTY;
3217 +	
3218 +	
3219 +	switch (cmd) {
3220 +		case SET_COLOR_SHM_CMD:
3221 +			
3222 +			err = copy_from_user(&color_info, (void*)arg, sizeof(struct color_ioctl_cmd));
3223 +	
3224 +			color_param.color = color_info.color;
3225 +			color_param.bank = color_info.bank;
3226 +			printk(KERN_INFO "COLOR = %x\n", color_param.color);
3227 +			printk(KERN_INFO "BANK  = %x\n", color_param.bank);
3228 +			err = 0;
3229 +			break;
3230 +		case SET_COLOR_SHM_OFFSET:
3231 +			err = copy_from_user(&color_off, (void*)arg, sizeof(struct color_ioctl_offset));
3232 +	
3233 +			color_offset.offset = color_off.offset;
3234 +			color_offset.lock = color_off.lock;
3235 +			printk(KERN_INFO "OFFSET = %x\n", color_offset.offset);
3236 +			printk(KERN_INFO "LOCK   = %d\n", color_offset.lock);
3237 +			err = 0;
3238 +			break;
3239 +			
3240 +		default:
3241 +			printk(KERN_INFO "Invalid IOCTL CMD\n");
3242 +			err = -EINVAL;
3243 +	}
3244 +
3245 +	return err;
3246 +}
3247 +
3248 +static struct file_operations litmus_color_shm_fops = {
3249 +	.owner	= THIS_MODULE,
3250 +	.mmap	= litmus_color_shm_mmap,
3251 +	.unlocked_ioctl	= litmus_color_shm_ioctl,
3252 +};
3253 +
3254 +static struct miscdevice litmus_color_shm_dev = {
3255 +	.name	= DEV_NAME,
3256 +	.minor	= MISC_DYNAMIC_MINOR,
3257 +	.fops	= &litmus_color_shm_fops,
3258 +};
3259 +
3260 +struct mutex bypass_mutex;
3261 +
3262 +int bypass_proc_handler(struct ctl_table *table, int write,
3263 +		void __user *buffer, size_t *lenp, loff_t *ppos)
3264 +{
3265 +	int ret, mode;
3266 +
3267 +	mutex_lock(&bypass_mutex);
3268 +	ret = proc_dointvec(table, write, buffer, lenp, ppos);
3269 +	printk(KERN_INFO "shm_bypass = %d\n", bypass_cache);
3270 +	mutex_unlock(&bypass_mutex);
3271 +	
3272 +	return ret;
3273 +}
3274 +
3275 +static int zero = 0;
3276 +static int one = 1;
3277 +
3278 +static struct ctl_table cache_table[] =
3279 +{
3280 +	{
3281 +		.procname	= "shm_bypass",
3282 +		.mode		= 0666,
3283 +		.proc_handler	= bypass_proc_handler,
3284 +		.data		= &bypass_cache,
3285 +		.maxlen		= sizeof(bypass_cache),
3286 +		.extra1		= &zero,
3287 +		.extra2		= &one,
3288 +	},	
3289 +	{ }
3290 +};
3291 +
3292 +static struct ctl_table litmus_dir_table[] = {
3293 +	{
3294 +		.procname	= "litmus",
3295 + 		.mode		= 0555,
3296 +		.child		= cache_table,
3297 +	},
3298 +	{ }
3299 +};
3300 +
3301 +static struct ctl_table_header *litmus_sysctls;
3302 +
3303 +static int __init init_color_shm_devices(void)
3304 +{
3305 +	int err;
3306 +
3307 +	printk(KERN_INFO "Registering LITMUS^RT color_shm devices.\n");
3308 +	litmus_sysctls = register_sysctl_table(litmus_dir_table);
3309 +	if (!litmus_sysctls) {
3310 +		printk(KERN_WARNING "Could not register LITMUS^RT color_shm sysctl.\n");
3311 +		err = -EFAULT;
3312 +	}
3313 +	
3314 +	mutex_init(&dev_lock);
3315 +	mutex_init(&bypass_mutex);
3316 +	color_param.color = 0x00000000;
3317 +	color_param.bank = 0x00000000;
3318 +	color_offset.offset = 0xffffffff;
3319 +	color_offset.lock = -1;
3320 +	bypass_cache = 0;
3321 +	err = misc_register(&litmus_color_shm_dev);
3322 +	
3323 +	return err;
3324 +}
3325 +
3326 +static void __exit exit_color_shm_devices(void)
3327 +{
3328 +	misc_deregister(&litmus_color_shm_dev);
3329 +	printk(KERN_INFO "Unregistering %s device.\n", DEV_NAME);
3330 +}
3331 +
3332 +module_init(init_color_shm_devices);
3333 +module_exit(exit_color_shm_devices);
3334 \ No newline at end of file
3335 diff --git litmus/litmus.c litmus/litmus.c
3336 index db5ce0e9..ddb80e1 100644
3337 --- litmus/litmus.c
3338 +++ litmus/litmus.c
3339 @@ -14,6 +14,10 @@
3340  #include <linux/sched/rt.h>
3341  #include <linux/rwsem.h>
3342  #include <linux/interrupt.h>
3343 +#include <linux/migrate.h>
3344 +#include <linux/mm.h>
3345 +#include <linux/memcontrol.h>
3346 +#include <linux/mm_inline.h>
3347  
3348  #include <litmus/litmus.h>
3349  #include <litmus/bheap.h>
3350 @@ -21,6 +25,8 @@
3351  #include <litmus/rt_domain.h>
3352  #include <litmus/litmus_proc.h>
3353  #include <litmus/sched_trace.h>
3354 +#include <litmus/cache_proc.h>
3355 +#include <litmus/mc2_common.h>
3356  
3357  #ifdef CONFIG_SCHED_CPU_AFFINITY
3358  #include <litmus/affinity.h>
3359 @@ -31,6 +37,8 @@
3360  #include <trace/events/litmus.h>
3361  #endif
3362  
3363 +extern void l2c310_flush_all(void);
3364 +
3365  /* Number of RT tasks that exist in the system */
3366  atomic_t rt_task_count 		= ATOMIC_INIT(0);
3367  
3368 @@ -160,6 +168,14 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
3369  		       pid, tp.budget_policy);
3370  		goto out_unlock;
3371  	}
3372 +#ifdef CONFIG_PGMRT_SUPPORT
3373 +	if (tp.pgm_type < PGM_NOT_A_NODE || tp.pgm_type > PGM_INTERNAL) {
3374 +		printk(KERN_INFO "litmus: real-time task %d rejected "
3375 +				"because of unknown PGM node type specified (%d)\n",
3376 +				pid, tp.pgm_type);
3377 +		goto out_unlock;
3378 +	}
3379 +#endif
3380  
3381  	target->rt_param.task_params = tp;
3382  
3383 @@ -314,6 +330,209 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
3384  	return ret;
3385  }
3386  
3387 +asmlinkage long sys_reservation_create(int type, void __user *config)
3388 +{
3389 +    return litmus->reservation_create(type, config);
3390 +}
3391 +
3392 +asmlinkage long sys_reservation_destroy(unsigned int reservation_id, int cpu)
3393 +{
3394 +    return litmus->reservation_destroy(reservation_id, cpu);
3395 +}
3396 +
3397 +static unsigned long color_mask;
3398 +
3399 +static inline unsigned long page_color(struct page *page)
3400 +{
3401 +    return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT);
3402 +}
3403 +
3404 +extern int isolate_lru_page(struct page *page);
3405 +extern void putback_movable_page(struct page *page);
3406 +extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x);
3407 +
3408 +asmlinkage long sys_set_page_color(int cpu)
3409 +{
3410 +	long ret = 0;
3411 +	//struct page *page_itr = NULL;
3412 +	struct vm_area_struct *vma_itr = NULL;
3413 +	int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0, nr_not_migrated = 0;
3414 +	unsigned long node;
3415 +	enum crit_level lv;
3416 +	struct mm_struct *mm;
3417 +		
3418 +	LIST_HEAD(pagelist);
3419 +	LIST_HEAD(shared_pagelist);
3420 +	
3421 +	migrate_prep();
3422 +	
3423 +	rcu_read_lock();
3424 +	get_task_struct(current);
3425 +	rcu_read_unlock();
3426 +	mm = get_task_mm(current);
3427 +	put_task_struct(current);
3428 +
3429 +	//down_read(&current->mm->mmap_sem);
3430 +	down_read(&mm->mmap_sem);
3431 +	TRACE_TASK(current, "SYSCALL set_page_color\n");
3432 +	vma_itr = mm->mmap;
3433 +	while (vma_itr != NULL) {
3434 +		unsigned int num_pages = 0, i;
3435 +		struct page *old_page = NULL;
3436 +		
3437 +		num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE;
3438 +		// print vma flags
3439 +		//printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags);
3440 +		//printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE);
3441 +		//printk(KERN_INFO "vm_page_prot: 0x%lx\n", vma_itr->vm_page_prot);
3442 +		for (i = 0; i < num_pages; i++) {
3443 +			old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT);
3444 +			
3445 +			if (IS_ERR(old_page))
3446 +				continue;
3447 +			if (!old_page)
3448 +				continue;
3449 +
3450 +			if (PageReserved(old_page)) {
3451 +				TRACE("Reserved Page!\n");
3452 +				put_page(old_page);
3453 +				continue;
3454 +			}
3455 +			
3456 +			TRACE_TASK(current, "addr: %08x, pfn: %x, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page));
3457 +			
3458 +			//if (page_mapcount(old_page) == 1) {
3459 +				ret = isolate_lru_page(old_page);
3460 +				if (!ret) {
3461 +					list_add_tail(&old_page->lru, &pagelist);
3462 +					inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
3463 +					nr_pages++;
3464 +				} else {
3465 +					TRACE_TASK(current, "isolate_lru_page failed\n");
3466 +					TRACE_TASK(current, "page_lru = %d PageLRU = %d\n", page_lru(old_page), PageLRU(old_page));
3467 +					nr_failed++;
3468 +				}
3469 +				//printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page));
3470 +				put_page(old_page);
3471 +			//}
3472 +			/*
3473 +			else {
3474 +				nr_shared_pages++;
3475 +				//printk(KERN_INFO "SHARED _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page));
3476 +				put_page(old_page);
3477 +			}
3478 +			*/
3479 +		}
3480 +		
3481 +		vma_itr = vma_itr->vm_next;
3482 +	}
3483 +
3484 +	//list_for_each_entry(page_itr, &pagelist, lru) {
3485 +//		printk(KERN_INFO "B _mapcount = %d, _count = %d\n", page_mapcount(page_itr), page_count(page_itr));
3486 +//	}
3487 +	
3488 +	ret = 0;
3489 +	if (!is_realtime(current))
3490 +		lv = 1;
3491 +	else {
3492 +		lv = tsk_rt(current)->mc2_data->crit;
3493 +	}
3494 +	
3495 +	if (cpu == -1)
3496 +		node = 8;
3497 +	else
3498 +		node = cpu*2 + lv;
3499 +		
3500 +	if (!list_empty(&pagelist)) {
3501 +		ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
3502 +		TRACE_TASK(current, "%ld pages not migrated.\n", ret);
3503 +		printk(KERN_INFO "%ld pages not migrated.\n", ret);
3504 +		nr_not_migrated = ret;
3505 +		if (ret) {
3506 +			putback_movable_pages(&pagelist);
3507 +		}
3508 +	}
3509 +	
3510 +	/* handle sigpage and litmus ctrl_page */
3511 +/*	vma_itr = current->mm->mmap;
3512 +	while (vma_itr != NULL) {
3513 +		if (vma_itr->vm_start == tsk_rt(current)->addr_ctrl_page) {
3514 +			TRACE("litmus ctrl_page = %08x\n", vma_itr->vm_start);
3515 +			vma_itr->vm_page_prot = PAGE_SHARED;
3516 +			break;
3517 +		}
3518 +		vma_itr = vma_itr->vm_next;
3519 +	}
3520 +*/
3521 +	up_read(&mm->mmap_sem);
3522 +
3523 +/*	
3524 +	list_for_each_entry(page_itr, &shared_pagelist, lru) {
3525 +		TRACE("S Anon=%d, pfn = %lu, _mapcount = %d, _count = %d\n", PageAnon(page_itr), __page_to_pfn(page_itr), page_mapcount(page_itr), page_count(page_itr));
3526 +	}
3527 +*/	
3528 +	TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed);
3529 +	printk(KERN_INFO "node = %ld, nr_migrated_pages = %d, nr_shared_pages = %d, nr_failed = %d\n", node, nr_pages-nr_not_migrated, nr_failed-2, nr_failed);
3530 +	//printk(KERN_INFO "node = %d\n", cpu_to_node(smp_processor_id()));
3531 +	flush_cache(1);
3532 +	
3533 +	return ret;
3534 +}
3535 +
3536 +/* sys_test_call() is a test system call for developing */
3537 +asmlinkage long sys_test_call(unsigned int param)
3538 +{
3539 +	long ret = 0;
3540 +	unsigned long flags;
3541 +	struct vm_area_struct *vma_itr = NULL;
3542 +	
3543 +	TRACE_CUR("test_call param = %d\n", param);
3544 +	
3545 +	if (param == 0) {
3546 +		down_read(&current->mm->mmap_sem);
3547 +		vma_itr = current->mm->mmap;
3548 +		while (vma_itr != NULL) {
3549 +			printk(KERN_INFO "--------------------------------------------\n");
3550 +			printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start);
3551 +			printk(KERN_INFO "vm_end   : %lx\n", vma_itr->vm_end);
3552 +			printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags);
3553 +			printk(KERN_INFO "vm_prot  : %x\n", pgprot_val(vma_itr->vm_page_prot));
3554 +			printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED);
3555 +	/*		if (vma_itr->vm_file) {
3556 +				struct file *fp = vma_itr->vm_file;
3557 +				unsigned long fcount = atomic_long_read(&(fp->f_count));
3558 +				printk(KERN_INFO "f_count : %ld\n", fcount);
3559 +				if (fcount > 1) {
3560 +					vma_itr->vm_page_prot = pgprot_noncached(vma_itr->vm_page_prot);
3561 +				}
3562 +			}
3563 +			printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot));
3564 +	*/		
3565 +			vma_itr = vma_itr->vm_next;
3566 +		}
3567 +		printk(KERN_INFO "--------------------------------------------\n");
3568 +		up_read(&current->mm->mmap_sem);
3569 +		
3570 +		local_irq_save(flags);
3571 +		l2c310_flush_all();
3572 +		local_irq_restore(flags);
3573 +	}
3574 +	else if (param == 1) {
3575 +		int i;
3576 +		flush_cache(1);
3577 +		for (i = 0; i < 4; i++) {
3578 +			lock_cache(i, 0x00003fff);
3579 +		}
3580 +	}
3581 +	else if (param == 2) {
3582 +		int i;
3583 +		for (i = 0; i < 4; i++) {
3584 +			lock_cache(i, 0xffffffff);
3585 +		}
3586 +	}
3587 +	return ret;
3588 +}
3589 +
3590  /* p is a real-time task. Re-init its state as a best-effort task. */
3591  static void reinit_litmus_state(struct task_struct* p, int restore)
3592  {
3593 @@ -651,6 +870,12 @@ static int __init _init_litmus(void)
3594  	 *      mode change lock is used to enforce single mode change
3595  	 *      operation.
3596  	 */
3597 +#if defined(CONFIG_CPU_V7)
3598 +	unsigned int line_size_log = 5; // 2^5 = 32 byte
3599 +	unsigned int cache_info_sets = 2048; // 64KB (way_size) / 32B (line_size) = 2048
3600 +	printk("LITMIS^RT-ARM kernel\n");
3601 +#endif
3602 +
3603  	printk("Starting LITMUS^RT kernel\n");
3604  
3605  	register_sched_plugin(&linux_sched_plugin);
3606 @@ -665,11 +890,15 @@ static int __init _init_litmus(void)
3607  	else
3608  		printk("Could not register kill rt tasks magic sysrq.\n");
3609  #endif
3610 -
3611  	init_litmus_proc();
3612  
3613  	register_reboot_notifier(&shutdown_notifier);
3614  
3615 +#if defined(CONFIG_CPU_V7)
3616 +	color_mask = ((cache_info_sets << line_size_log) - 1) ^ (PAGE_SIZE - 1);
3617 +	printk("Page color mask %lx\n", color_mask);
3618 +#endif
3619 +
3620  	return 0;
3621  }
3622  
3623 diff --git litmus/mc2_common.c litmus/mc2_common.c
3624 new file mode 100644
3625 index 0000000..a8ea5d9
3626 --- /dev/null
3627 +++ litmus/mc2_common.c
3628 @@ -0,0 +1,78 @@
3629 +/*
3630 + * litmus/mc2_common.c
3631 + *
3632 + * Common functions for MC2 plugin.
3633 + */
3634 +
3635 +#include <linux/percpu.h>
3636 +#include <linux/sched.h>
3637 +#include <linux/list.h>
3638 +#include <linux/slab.h>
3639 +#include <asm/uaccess.h>
3640 +
3641 +#include <litmus/litmus.h>
3642 +#include <litmus/sched_plugin.h>
3643 +#include <litmus/sched_trace.h>
3644 +
3645 +#include <litmus/mc2_common.h>
3646 +
3647 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk, struct reservation *res)
3648 +{
3649 +	task_client_init(tc, tsk, res);
3650 +	if ((mc2_param->crit < CRIT_LEVEL_A) ||
3651 +		(mc2_param->crit > CRIT_LEVEL_C))
3652 +		return -EINVAL;
3653 +	
3654 +	TRACE_TASK(tsk, "mc2_task_client_init: crit_level = %d\n", mc2_param->crit);
3655 +	
3656 +	return 0;
3657 +}
3658 +
3659 +asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param)
3660 +{
3661 +	struct task_struct *target;
3662 +	int retval = -EINVAL;
3663 +	struct mc2_task *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
3664 +	
3665 +	if (!mp)
3666 +		return -ENOMEM;
3667 +
3668 +	printk("Setting up mc^2 task parameters for process %d.\n", pid);
3669 +
3670 +	if (pid < 0 || param == 0) {
3671 +		goto out;
3672 +	}
3673 +	if (copy_from_user(mp, param, sizeof(*mp))) {
3674 +		retval = -EFAULT;
3675 +		goto out;
3676 +	}
3677 +
3678 +	/* Task search and manipulation must be protected */
3679 +	read_lock_irq(&tasklist_lock);
3680 +	if (!(target = find_task_by_vpid(pid))) {
3681 +		retval = -ESRCH;
3682 +		goto out_unlock;
3683 +	}
3684 +
3685 +	if (is_realtime(target)) {
3686 +		/* The task is already a real-time task.
3687 +		 * We cannot not allow parameter changes at this point.
3688 +		 */
3689 +		retval = -EBUSY;
3690 +		goto out_unlock;
3691 +	}
3692 +	if (mp->crit < CRIT_LEVEL_A || mp->crit >= NUM_CRIT_LEVELS) {
3693 +		printk(KERN_INFO "litmus: real-time task %d rejected "
3694 +			"because of invalid criticality level\n", pid);
3695 +		goto out_unlock;
3696 +	}
3697 +	
3698 +	//target->rt_param.plugin_state = mp;
3699 +	target->rt_param.mc2_data = mp;
3700 +
3701 +	retval = 0;
3702 +out_unlock:
3703 +	read_unlock_irq(&tasklist_lock);
3704 +out:
3705 +	return retval;
3706 +}
3707 \ No newline at end of file
3708 diff --git litmus/polling_reservations.c litmus/polling_reservations.c
3709 new file mode 100644
3710 index 0000000..4a2fee57
3711 --- /dev/null
3712 +++ litmus/polling_reservations.c
3713 @@ -0,0 +1,564 @@
3714 +#include <linux/sched.h>
3715 +
3716 +#include <litmus/litmus.h>
3717 +#include <litmus/reservation.h>
3718 +#include <litmus/polling_reservations.h>
3719 +
3720 +
3721 +static void periodic_polling_client_arrives(
3722 +	struct reservation* res,
3723 +	struct reservation_client *client
3724 +)
3725 +{
3726 +	struct polling_reservation *pres =
3727 +		container_of(res, struct polling_reservation, res);
3728 +	lt_t instances, tmp;
3729 +
3730 +	list_add_tail(&client->list, &res->clients);
3731 +
3732 +	switch (res->state) {
3733 +		case RESERVATION_INACTIVE:
3734 +			/* Figure out next replenishment time. */
3735 +			if (res->env->time_zero == 0) {
3736 +				tmp = res->env->current_time - res->env->time_zero;
3737 +				instances =  div64_u64(tmp, pres->period);
3738 +				res->next_replenishment =
3739 +					(instances + 1) * pres->period + pres->offset;
3740 +			}
3741 +			else {
3742 +				tmp = res->env->current_time - res->env->time_zero;
3743 +				instances =  div64_u64(tmp, pres->period);
3744 +				res->next_replenishment = res->env->time_zero + instances * pres->period;
3745 +			}
3746 +				
3747 +			TRACE("ENV_TIME_ZERO %llu\n", res->env->time_zero);
3748 +			TRACE("pol-res: R%d activate tmp=%llu instances=%llu period=%llu nextrp=%llu cur=%llu\n",
3749 +				res->id, tmp, instances, pres->period, res->next_replenishment,
3750 +				res->env->current_time);
3751 +
3752 +			res->env->change_state(res->env, res,
3753 +				RESERVATION_DEPLETED);
3754 +			break;
3755 +
3756 +		case RESERVATION_ACTIVE:
3757 +		case RESERVATION_DEPLETED:
3758 +			/* do nothing */
3759 +			break;
3760 +
3761 +		case RESERVATION_ACTIVE_IDLE:
3762 +			res->blocked_by_ghost = 0;
3763 +			res->env->change_state(res->env, res,
3764 +				RESERVATION_ACTIVE);
3765 +			break;
3766 +	}
3767 +}
3768 +
3769 +
3770 +static void periodic_polling_client_departs(
3771 +	struct reservation *res,
3772 +	struct reservation_client *client,
3773 +	int did_signal_job_completion
3774 +)
3775 +{
3776 +	list_del(&client->list);
3777 +
3778 +	switch (res->state) {
3779 +		case RESERVATION_INACTIVE:
3780 +		case RESERVATION_ACTIVE_IDLE:
3781 +			BUG(); /* INACTIVE or IDLE <=> no client */
3782 +			break;
3783 +
3784 +		case RESERVATION_ACTIVE:
3785 +			if (list_empty(&res->clients)) {
3786 +				res->env->change_state(res->env, res,
3787 +//						RESERVATION_ACTIVE_IDLE);
3788 +					res->cur_budget ?
3789 +						RESERVATION_ACTIVE_IDLE :
3790 +						RESERVATION_DEPLETED);
3791 +//					did_signal_job_completion ?
3792 +//						RESERVATION_DEPLETED :
3793 +//						RESERVATION_ACTIVE_IDLE);
3794 +			} /* else: nothing to do, more clients ready */
3795 +			break;
3796 +
3797 +		case RESERVATION_DEPLETED:
3798 +			/* do nothing */
3799 +			break;
3800 +	}
3801 +}
3802 +
3803 +static void periodic_polling_on_replenishment(
3804 +	struct reservation *res
3805 +)
3806 +{
3807 +	struct polling_reservation *pres =
3808 +		container_of(res, struct polling_reservation, res);
3809 +
3810 +	/* replenish budget */
3811 +	res->cur_budget = pres->max_budget;
3812 +	res->next_replenishment += pres->period;
3813 +	res->budget_consumed = 0;
3814 +
3815 +	TRACE("polling_replenish(%u): next_replenishment=%llu\n", res->id, res->next_replenishment);
3816 +	switch (res->state) {
3817 +		case RESERVATION_DEPLETED:
3818 +		case RESERVATION_INACTIVE:
3819 +		case RESERVATION_ACTIVE_IDLE:
3820 +			if (list_empty(&res->clients))
3821 +				/* no clients => poll again later */
3822 +				res->env->change_state(res->env, res,
3823 +					RESERVATION_INACTIVE);
3824 +			else
3825 +				/* we have clients & budget => ACTIVE */
3826 +				res->env->change_state(res->env, res,
3827 +					RESERVATION_ACTIVE);
3828 +			break;
3829 +
3830 +		case RESERVATION_ACTIVE:
3831 +			/* Replenished while active => tardy? In any case,
3832 +			 * go ahead and stay active. */
3833 +			break;
3834 +	}
3835 +}
3836 +
3837 +static void periodic_polling_on_replenishment_edf(
3838 +	struct reservation *res
3839 +)
3840 +{
3841 +	struct polling_reservation *pres =
3842 +		container_of(res, struct polling_reservation, res);
3843 +
3844 +	/* update current priority */
3845 +	res->priority = res->next_replenishment + pres->deadline;
3846 +
3847 +	/* do common updates */
3848 +	periodic_polling_on_replenishment(res);
3849 +}
3850 +
3851 +static void common_drain_budget(
3852 +		struct reservation *res,
3853 +		lt_t how_much)
3854 +{
3855 +	if (how_much >= res->cur_budget)
3856 +		res->cur_budget = 0;
3857 +	else
3858 +		res->cur_budget -= how_much;
3859 +
3860 +	res->budget_consumed += how_much;
3861 +	res->budget_consumed_total += how_much;
3862 +
3863 +	switch (res->state) {
3864 +		case RESERVATION_DEPLETED:
3865 +		case RESERVATION_INACTIVE:
3866 +			//BUG();
3867 +			TRACE("!!!!!!!!!!!!!!!STATE ERROR R%d STATE(%d)\n", res->id, res->state);
3868 +			break;
3869 +
3870 +		case RESERVATION_ACTIVE_IDLE:
3871 +		case RESERVATION_ACTIVE:
3872 +			if (!res->cur_budget) {
3873 +				res->env->change_state(res->env, res,
3874 +					RESERVATION_DEPLETED);
3875 +			} /* else: stay in current state */
3876 +			break;
3877 +	}
3878 +}
3879 +
3880 +static struct reservation_ops periodic_polling_ops_fp = {
3881 +	.dispatch_client = default_dispatch_client,
3882 +	.client_arrives = periodic_polling_client_arrives,
3883 +	.client_departs = periodic_polling_client_departs,
3884 +	.replenish = periodic_polling_on_replenishment,
3885 +	.drain_budget = common_drain_budget,
3886 +};
3887 +
3888 +static struct reservation_ops periodic_polling_ops_edf = {
3889 +	.dispatch_client = default_dispatch_client,
3890 +	.client_arrives = periodic_polling_client_arrives,
3891 +	.client_departs = periodic_polling_client_departs,
3892 +	.replenish = periodic_polling_on_replenishment_edf,
3893 +	.drain_budget = common_drain_budget,
3894 +};
3895 +
3896 +
3897 +
3898 +
3899 +static void sporadic_polling_client_arrives_fp(
3900 +	struct reservation* res,
3901 +	struct reservation_client *client
3902 +)
3903 +{
3904 +	struct polling_reservation *pres =
3905 +		container_of(res, struct polling_reservation, res);
3906 +
3907 +	list_add_tail(&client->list, &res->clients);
3908 +
3909 +	switch (res->state) {
3910 +		case RESERVATION_INACTIVE:
3911 +			/* Replenish now. */
3912 +			res->cur_budget = pres->max_budget;
3913 +			res->next_replenishment =
3914 +				res->env->current_time + pres->period;
3915 +
3916 +			res->env->change_state(res->env, res,
3917 +				RESERVATION_ACTIVE);
3918 +			break;
3919 +
3920 +		case RESERVATION_ACTIVE:
3921 +		case RESERVATION_DEPLETED:
3922 +			/* do nothing */
3923 +			break;
3924 +
3925 +		case RESERVATION_ACTIVE_IDLE:
3926 +			res->env->change_state(res->env, res,
3927 +				RESERVATION_ACTIVE);
3928 +			break;
3929 +	}
3930 +}
3931 +
3932 +static void sporadic_polling_client_arrives_edf(
3933 +	struct reservation* res,
3934 +	struct reservation_client *client
3935 +)
3936 +{
3937 +	struct polling_reservation *pres =
3938 +		container_of(res, struct polling_reservation, res);
3939 +
3940 +	list_add_tail(&client->list, &res->clients);
3941 +
3942 +	switch (res->state) {
3943 +		case RESERVATION_INACTIVE:
3944 +			/* Replenish now. */
3945 +			res->cur_budget = pres->max_budget;
3946 +			res->next_replenishment =
3947 +				res->env->current_time + pres->period;
3948 +			res->priority =
3949 +				res->env->current_time + pres->deadline;
3950 +
3951 +			res->env->change_state(res->env, res,
3952 +				RESERVATION_ACTIVE);
3953 +			break;
3954 +
3955 +		case RESERVATION_ACTIVE:
3956 +		case RESERVATION_DEPLETED:
3957 +			/* do nothing */
3958 +			break;
3959 +
3960 +		case RESERVATION_ACTIVE_IDLE:
3961 +			res->env->change_state(res->env, res,
3962 +				RESERVATION_ACTIVE);
3963 +			break;
3964 +	}
3965 +}
3966 +
3967 +static struct reservation_ops sporadic_polling_ops_fp = {
3968 +	.dispatch_client = default_dispatch_client,
3969 +	.client_arrives = sporadic_polling_client_arrives_fp,
3970 +	.client_departs = periodic_polling_client_departs,
3971 +	.replenish = periodic_polling_on_replenishment,
3972 +	.drain_budget = common_drain_budget,
3973 +};
3974 +
3975 +static struct reservation_ops sporadic_polling_ops_edf = {
3976 +	.dispatch_client = default_dispatch_client,
3977 +	.client_arrives = sporadic_polling_client_arrives_edf,
3978 +	.client_departs = periodic_polling_client_departs,
3979 +	.replenish = periodic_polling_on_replenishment_edf,
3980 +	.drain_budget = common_drain_budget,
3981 +};
3982 +
3983 +void polling_reservation_init(
3984 +	struct polling_reservation *pres,
3985 +	int use_edf_prio,
3986 +	int use_periodic_polling,
3987 +	lt_t budget, lt_t period, lt_t deadline, lt_t offset
3988 +)
3989 +{
3990 +	if (!deadline)
3991 +		deadline = period;
3992 +	BUG_ON(budget > period);
3993 +	BUG_ON(budget > deadline);
3994 +	BUG_ON(offset >= period);
3995 +
3996 +	reservation_init(&pres->res);
3997 +	pres->max_budget = budget;
3998 +	pres->period = period;
3999 +	pres->deadline = deadline;
4000 +	pres->offset = offset;
4001 +	TRACE_TASK(current, "polling_reservation_init: periodic %d, use_edf %d\n", use_periodic_polling, use_edf_prio);
4002 +	if (use_periodic_polling) {
4003 +		if (use_edf_prio)
4004 +			pres->res.ops = &periodic_polling_ops_edf;
4005 +		else
4006 +			pres->res.ops = &periodic_polling_ops_fp;
4007 +	} else {
4008 +		if (use_edf_prio)
4009 +			pres->res.ops = &sporadic_polling_ops_edf;
4010 +		else
4011 +			pres->res.ops = &sporadic_polling_ops_fp;
4012 +	}
4013 +}
4014 +
4015 +
4016 +static lt_t td_cur_major_cycle_start(struct table_driven_reservation *tdres)
4017 +{
4018 +	lt_t x, tmp;
4019 +
4020 +	tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
4021 +	x = div64_u64(tmp, tdres->major_cycle);
4022 +	x *= tdres->major_cycle;
4023 +	return x;
4024 +}
4025 +
4026 +
4027 +static lt_t td_next_major_cycle_start(struct table_driven_reservation *tdres)
4028 +{
4029 +	lt_t x, tmp;
4030 +
4031 +	tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
4032 +	x = div64_u64(tmp, tdres->major_cycle) + 1;
4033 +	x *= tdres->major_cycle;
4034 +	return x;
4035 +}
4036 +
4037 +static void td_client_arrives(
4038 +	struct reservation* res,
4039 +	struct reservation_client *client
4040 +)
4041 +{
4042 +	struct table_driven_reservation *tdres =
4043 +		container_of(res, struct table_driven_reservation, res);
4044 +
4045 +	list_add_tail(&client->list, &res->clients);
4046 +
4047 +	switch (res->state) {
4048 +		case RESERVATION_INACTIVE:
4049 +			/* Figure out first replenishment time. */
4050 +			tdres->major_cycle_start = td_next_major_cycle_start(tdres);
4051 +			res->next_replenishment  = tdres->major_cycle_start;
4052 +			res->next_replenishment += tdres->intervals[0].start;
4053 +			tdres->next_interval = 0;
4054 +
4055 +			res->env->change_state(res->env, res,
4056 +				RESERVATION_DEPLETED);
4057 +			break;
4058 +
4059 +		case RESERVATION_ACTIVE:
4060 +		case RESERVATION_DEPLETED:
4061 +			/* do nothing */
4062 +			break;
4063 +
4064 +		case RESERVATION_ACTIVE_IDLE:
4065 +			res->env->change_state(res->env, res,
4066 +				RESERVATION_ACTIVE);
4067 +			break;
4068 +	}
4069 +}
4070 +
4071 +static void td_client_departs(
4072 +	struct reservation *res,
4073 +	struct reservation_client *client,
4074 +	int did_signal_job_completion
4075 +)
4076 +{
4077 +	list_del(&client->list);
4078 +
4079 +	switch (res->state) {
4080 +		case RESERVATION_INACTIVE:
4081 +		case RESERVATION_ACTIVE_IDLE:
4082 +			//BUG(); /* INACTIVE or IDLE <=> no client */
4083 +			break;
4084 +
4085 +		case RESERVATION_ACTIVE:
4086 +			if (list_empty(&res->clients)) {
4087 +				res->env->change_state(res->env, res,
4088 +						RESERVATION_ACTIVE_IDLE);
4089 +			} /* else: nothing to do, more clients ready */
4090 +			break;
4091 +
4092 +		case RESERVATION_DEPLETED:
4093 +			/* do nothing */
4094 +			break;
4095 +	}
4096 +}
4097 +
4098 +static lt_t td_time_remaining_until_end(struct table_driven_reservation *tdres)
4099 +{
4100 +	lt_t now = tdres->res.env->current_time;
4101 +	lt_t end = tdres->cur_interval.end;
4102 +	//TRACE("td_remaining(%u): start=%llu now=%llu end=%llu state=%d\n", tdres->res.id,	tdres->cur_interval.start, now, end, tdres->res.state);
4103 +	if (now >=  end)
4104 +		return 0;
4105 +	else
4106 +		return end - now;
4107 +}
4108 +
4109 +static void td_replenish(
4110 +	struct reservation *res)
4111 +{
4112 +	struct table_driven_reservation *tdres =
4113 +		container_of(res, struct table_driven_reservation, res);
4114 +
4115 +	//TRACE("td_replenish(%u): expected_replenishment=%llu\n", res->id, res->next_replenishment);
4116 +
4117 +	/* figure out current interval */
4118 +	tdres->cur_interval.start = tdres->major_cycle_start +
4119 +		tdres->intervals[tdres->next_interval].start;
4120 +	tdres->cur_interval.end =  tdres->major_cycle_start +
4121 +		tdres->intervals[tdres->next_interval].end;
4122 +/*	TRACE("major_cycle_start=%llu => [%llu, %llu]\n",
4123 +		tdres->major_cycle_start,
4124 +		tdres->cur_interval.start,
4125 +		tdres->cur_interval.end);
4126 +*/
4127 +	/* reset budget */
4128 +	res->cur_budget = td_time_remaining_until_end(tdres);
4129 +	res->budget_consumed = 0;
4130 +	//TRACE("td_replenish(%u): %s budget=%llu\n", res->id, res->cur_budget ? "" : "WARNING", res->cur_budget);
4131 +
4132 +	/* prepare next slot */
4133 +	tdres->next_interval = (tdres->next_interval + 1) % tdres->num_intervals;
4134 +	if (!tdres->next_interval)
4135 +		/* wrap to next major cycle */
4136 +		tdres->major_cycle_start += tdres->major_cycle;
4137 +
4138 +	/* determine next time this reservation becomes eligible to execute */
4139 +	res->next_replenishment  = tdres->major_cycle_start;
4140 +	res->next_replenishment += tdres->intervals[tdres->next_interval].start;
4141 +	//TRACE("td_replenish(%u): next_replenishment=%llu\n", res->id, res->next_replenishment);
4142 +
4143 +
4144 +	switch (res->state) {
4145 +		case RESERVATION_DEPLETED:
4146 +		case RESERVATION_ACTIVE:
4147 +		case RESERVATION_ACTIVE_IDLE:
4148 +			if (list_empty(&res->clients))
4149 +				res->env->change_state(res->env, res,
4150 +					RESERVATION_ACTIVE_IDLE);
4151 +			else
4152 +				/* we have clients & budget => ACTIVE */
4153 +				res->env->change_state(res->env, res,
4154 +					RESERVATION_ACTIVE);
4155 +			break;
4156 +
4157 +		case RESERVATION_INACTIVE:
4158 +			BUG();
4159 +			break;
4160 +	}
4161 +}
4162 +
4163 +static void td_drain_budget(
4164 +		struct reservation *res,
4165 +		lt_t how_much)
4166 +{
4167 +	struct table_driven_reservation *tdres =
4168 +		container_of(res, struct table_driven_reservation, res);
4169 +
4170 +	res->budget_consumed += how_much;
4171 +	res->budget_consumed_total += how_much;
4172 +
4173 +	/* Table-driven scheduling: instead of tracking the budget, we compute
4174 +	 * how much time is left in this allocation interval. */
4175 +
4176 +	/* sanity check: we should never try to drain from future slots */
4177 +	//TRACE("TD_DRAIN STATE(%d) [%llu,%llu]  %llu ?\n", res->state, tdres->cur_interval.start, tdres->cur_interval.end, res->env->current_time);
4178 +	//BUG_ON(tdres->cur_interval.start > res->env->current_time);
4179 +	if (tdres->cur_interval.start > res->env->current_time)
4180 +		TRACE("TD_DRAIN BUG!!!!!!!!!!\n");
4181 +
4182 +	switch (res->state) {
4183 +		case RESERVATION_DEPLETED:
4184 +		case RESERVATION_INACTIVE:
4185 +			//BUG();
4186 +			TRACE("TD_DRAIN!!!!!!!!! RES_STATE = %d\n", res->state);
4187 +			break;
4188 +
4189 +		case RESERVATION_ACTIVE_IDLE:
4190 +		case RESERVATION_ACTIVE:
4191 +			res->cur_budget = td_time_remaining_until_end(tdres);
4192 +			//TRACE("td_drain_budget(%u): drained to budget=%llu\n", res->id, res->cur_budget);
4193 +			if (!res->cur_budget) {
4194 +				res->env->change_state(res->env, res,
4195 +					RESERVATION_DEPLETED);
4196 +			} else {
4197 +				/* sanity check budget calculation */
4198 +				//BUG_ON(res->env->current_time >= tdres->cur_interval.end);
4199 +				//BUG_ON(res->env->current_time < tdres->cur_interval.start);
4200 +				if (res->env->current_time >= tdres->cur_interval.end)
4201 +					printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING1\n");
4202 +				if (res->env->current_time < tdres->cur_interval.start)
4203 +					printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING2\n");
4204 +			}
4205 +
4206 +			break;
4207 +	}
4208 +}
4209 +
4210 +static struct task_struct* td_dispatch_client(
4211 +	struct reservation *res,
4212 +	lt_t *for_at_most)
4213 +{
4214 +	struct task_struct *t;
4215 +	struct table_driven_reservation *tdres =
4216 +		container_of(res, struct table_driven_reservation, res);
4217 +
4218 +	/* usual logic for selecting a client */
4219 +	t = default_dispatch_client(res, for_at_most);
4220 +
4221 +	TRACE_TASK(t, "td_dispatch_client(%u): selected, budget=%llu\n",
4222 +		res->id, res->cur_budget);
4223 +
4224 +	/* check how much budget we have left in this time slot */
4225 +	res->cur_budget = td_time_remaining_until_end(tdres);
4226 +
4227 +	TRACE_TASK(t, "td_dispatch_client(%u): updated to budget=%llu next=%d\n",
4228 +		res->id, res->cur_budget, tdres->next_interval);
4229 +
4230 +	if (unlikely(!res->cur_budget)) {
4231 +		/* Unlikely case: if we ran out of budget, the user configured
4232 +		 * a broken scheduling table (overlapping table slots).
4233 +		 * Not much we can do about this, but we can't dispatch a job
4234 +		 * now without causing overload. So let's register this reservation
4235 +		 * as depleted and wait for the next allocation. */
4236 +		TRACE("td_dispatch_client(%u): budget unexpectedly depleted "
4237 +			"(check scheduling table for unintended overlap)\n",
4238 +			res->id);
4239 +		res->env->change_state(res->env, res,
4240 +			RESERVATION_DEPLETED);
4241 +		return NULL;
4242 +	} else
4243 +		return t;
4244 +}
4245 +
4246 +static struct reservation_ops td_ops = {
4247 +	.dispatch_client = td_dispatch_client,
4248 +	.client_arrives = td_client_arrives,
4249 +	.client_departs = td_client_departs,
4250 +	.replenish = td_replenish,
4251 +	.drain_budget = td_drain_budget,
4252 +};
4253 +
4254 +void table_driven_reservation_init(
4255 +	struct table_driven_reservation *tdres,
4256 +	lt_t major_cycle,
4257 +	struct lt_interval *intervals,
4258 +	unsigned int num_intervals)
4259 +{
4260 +	unsigned int i;
4261 +
4262 +	/* sanity checking */
4263 +	BUG_ON(!num_intervals);
4264 +	for (i = 0; i < num_intervals; i++)
4265 +		BUG_ON(intervals[i].end <= intervals[i].start);
4266 +	for (i = 0; i + 1 < num_intervals; i++)
4267 +		BUG_ON(intervals[i + 1].start <= intervals[i].end);
4268 +	BUG_ON(intervals[num_intervals - 1].end > major_cycle);
4269 +
4270 +	reservation_init(&tdres->res);
4271 +	tdres->major_cycle = major_cycle;
4272 +	tdres->intervals = intervals;
4273 +	tdres->cur_interval.start = 0;
4274 +	tdres->cur_interval.end   = 0;
4275 +	tdres->num_intervals = num_intervals;
4276 +	tdres->res.ops = &td_ops;
4277 +}
4278 diff --git litmus/reservation.c litmus/reservation.c
4279 new file mode 100644
4280 index 0000000..07e38cb
4281 --- /dev/null
4282 +++ litmus/reservation.c
4283 @@ -0,0 +1,709 @@
4284 +#include <linux/sched.h>
4285 +#include <linux/slab.h>
4286 +
4287 +#include <litmus/litmus.h>
4288 +#include <litmus/reservation.h>
4289 +
4290 +//#define TRACE(fmt, args...) do {} while (false)
4291 +//#define TRACE_TASK(fmt, args...) do {} while (false)
4292 +
4293 +#define BUDGET_ENFORCEMENT_AT_C 0
4294 +	
4295 +void reservation_init(struct reservation *res)
4296 +{
4297 +	memset(res, sizeof(*res), 0);
4298 +	res->state = RESERVATION_INACTIVE;
4299 +	INIT_LIST_HEAD(&res->clients);
4300 +}
4301 +
4302 +struct task_struct* default_dispatch_client(
4303 +	struct reservation *res,
4304 +	lt_t *for_at_most)
4305 +{
4306 +	struct reservation_client *client, *next;
4307 +	struct task_struct* tsk;
4308 +
4309 +	BUG_ON(res->state != RESERVATION_ACTIVE);
4310 +	*for_at_most = 0;
4311 +
4312 +	list_for_each_entry_safe(client, next, &res->clients, list) {
4313 +		tsk = client->dispatch(client);
4314 +		if (likely(tsk)) {
4315 +			return tsk;
4316 +		}
4317 +	}
4318 +	return NULL;
4319 +}
4320 +
4321 +static struct task_struct * task_client_dispatch(struct reservation_client *client)
4322 +{
4323 +	struct task_client *tc = container_of(client, struct task_client, client);
4324 +	return tc->task;
4325 +}
4326 +
4327 +void task_client_init(struct task_client *tc, struct task_struct *tsk,
4328 +	struct reservation *res)
4329 +{
4330 +	memset(&tc->client, sizeof(tc->client), 0);
4331 +	tc->client.dispatch = task_client_dispatch;
4332 +	tc->client.reservation = res;
4333 +	tc->task = tsk;
4334 +}
4335 +
4336 +static void sup_scheduler_update_at(
4337 +	struct sup_reservation_environment* sup_env,
4338 +	lt_t when)
4339 +{
4340 +	//TRACE("SCHEDULER_UPDATE_AT update: %llu > when %llu\n", sup_env->next_scheduler_update, when);
4341 +	if (sup_env->next_scheduler_update > when)
4342 +		sup_env->next_scheduler_update = when;
4343 +}
4344 +
4345 +void sup_scheduler_update_after(
4346 +	struct sup_reservation_environment* sup_env,
4347 +	lt_t timeout)
4348 +{
4349 +	sup_scheduler_update_at(sup_env, sup_env->env.current_time + timeout);
4350 +}
4351 +
4352 +static int _sup_queue_depleted(
4353 +	struct sup_reservation_environment* sup_env,
4354 +	struct reservation *res)
4355 +{
4356 +	struct list_head *pos;
4357 +	struct reservation *queued;
4358 +	int passed_earlier = 0;
4359 +
4360 +	list_for_each(pos, &sup_env->depleted_reservations) {
4361 +		queued = list_entry(pos, struct reservation, list);
4362 +		if (queued->next_replenishment > res->next_replenishment) {
4363 +			list_add(&res->list, pos->prev);
4364 +			return passed_earlier;
4365 +		} else
4366 +			passed_earlier = 1;
4367 +	}
4368 +
4369 +	list_add_tail(&res->list, &sup_env->depleted_reservations);
4370 +
4371 +	return passed_earlier;
4372 +}
4373 +
4374 +static void sup_queue_depleted(
4375 +	struct sup_reservation_environment* sup_env,
4376 +	struct reservation *res)
4377 +{
4378 +	int passed_earlier = _sup_queue_depleted(sup_env, res);
4379 +
4380 +	/* check for updated replenishment time */
4381 +	if (!passed_earlier)
4382 +		sup_scheduler_update_at(sup_env, res->next_replenishment);
4383 +}
4384 +
4385 +static int _sup_queue_active(
4386 +	struct sup_reservation_environment* sup_env,
4387 +	struct reservation *res)
4388 +{
4389 +	struct list_head *pos;
4390 +	struct reservation *queued;
4391 +	int passed_active = 0;
4392 +
4393 +	list_for_each(pos, &sup_env->active_reservations) {
4394 +		queued = list_entry(pos, struct reservation, list);
4395 +		if (queued->priority > res->priority) {
4396 +			list_add(&res->list, pos->prev);
4397 +			return passed_active;
4398 +		} else if (queued->state == RESERVATION_ACTIVE)
4399 +			passed_active = 1;
4400 +	}
4401 +
4402 +	list_add_tail(&res->list, &sup_env->active_reservations);
4403 +	return passed_active;
4404 +}
4405 +
4406 +static void sup_queue_active(
4407 +	struct sup_reservation_environment* sup_env,
4408 +	struct reservation *res)
4409 +{
4410 +	int passed_active = _sup_queue_active(sup_env, res);
4411 +
4412 +	/* check for possible preemption */
4413 +	if (res->state == RESERVATION_ACTIVE && !passed_active)
4414 +		sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
4415 +	else {
4416 +		/* Active means this reservation is draining budget => make sure
4417 +		 * the scheduler is called to notice when the reservation budget has been
4418 +		 * drained completely. */
4419 +		sup_scheduler_update_after(sup_env, res->cur_budget);
4420 +	}
4421 +}
4422 +
4423 +static void sup_queue_reservation(
4424 +	struct sup_reservation_environment* sup_env,
4425 +	struct reservation *res)
4426 +{
4427 +	switch (res->state) {
4428 +		case RESERVATION_INACTIVE:
4429 +			list_add(&res->list, &sup_env->inactive_reservations);
4430 +			break;
4431 +
4432 +		case RESERVATION_DEPLETED:
4433 +			sup_queue_depleted(sup_env, res);
4434 +			break;
4435 +
4436 +		case RESERVATION_ACTIVE_IDLE:
4437 +		case RESERVATION_ACTIVE:
4438 +			sup_queue_active(sup_env, res);
4439 +			break;
4440 +	}
4441 +}
4442 +
4443 +void sup_add_new_reservation(
4444 +	struct sup_reservation_environment* sup_env,
4445 +	struct reservation* new_res)
4446 +{
4447 +	new_res->env = &sup_env->env;
4448 +	sup_queue_reservation(sup_env, new_res);
4449 +}
4450 +
4451 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
4452 +	unsigned int id)
4453 +{
4454 +	struct reservation *res;
4455 +
4456 +	list_for_each_entry(res, &sup_env->active_reservations, list) {
4457 +		if (res->id == id)
4458 +			return res;
4459 +	}
4460 +	list_for_each_entry(res, &sup_env->inactive_reservations, list) {
4461 +		if (res->id == id)
4462 +			return res;
4463 +	}
4464 +	list_for_each_entry(res, &sup_env->depleted_reservations, list) {
4465 +		if (res->id == id)
4466 +			return res;
4467 +	}
4468 +
4469 +	return NULL;
4470 +}
4471 +
4472 +static void sup_charge_budget(
4473 +	struct sup_reservation_environment* sup_env,
4474 +	lt_t delta)
4475 +{
4476 +	struct list_head *pos, *next;
4477 +	struct reservation *res;
4478 +
4479 +	int encountered_active = 0;
4480 +
4481 +	list_for_each_safe(pos, next, &sup_env->active_reservations) {
4482 +		/* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
4483 +		res = list_entry(pos, struct reservation, list);
4484 +		if (res->state == RESERVATION_ACTIVE) {
4485 +			TRACE("sup_charge_budget ACTIVE R%u drain %llu\n", res->id, delta);
4486 +			if (encountered_active == 0 && res->blocked_by_ghost == 0) {
4487 +				TRACE("DRAIN !!\n");
4488 +				res->ops->drain_budget(res, delta);
4489 +				encountered_active = 1;
4490 +			}			
4491 +		} else {
4492 +			//BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
4493 +			TRACE("sup_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
4494 +			res->ops->drain_budget(res, delta);
4495 +		}
4496 +		if (res->state == RESERVATION_ACTIVE ||
4497 +			res->state == RESERVATION_ACTIVE_IDLE)
4498 +		{
4499 +			/* make sure scheduler is invoked when this reservation expires
4500 +			 * its remaining budget */
4501 +			 TRACE("requesting scheduler update for reservation %u in %llu nanoseconds\n",
4502 +				res->id, res->cur_budget);
4503 +			 sup_scheduler_update_after(sup_env, res->cur_budget);
4504 +		}
4505 +		//if (encountered_active == 2)
4506 +			/* stop at the first ACTIVE reservation */
4507 +		//	break;
4508 +	}
4509 +	//TRACE("finished charging budgets\n");
4510 +}
4511 +
4512 +static void sup_replenish_budgets(struct sup_reservation_environment* sup_env)
4513 +{
4514 +	struct list_head *pos, *next;
4515 +	struct reservation *res;
4516 +
4517 +	list_for_each_safe(pos, next, &sup_env->depleted_reservations) {
4518 +		res = list_entry(pos, struct reservation, list);
4519 +		if (res->next_replenishment <= sup_env->env.current_time) {
4520 +			res->ops->replenish(res);
4521 +		} else {
4522 +			/* list is ordered by increasing depletion times */
4523 +			break;
4524 +		}
4525 +	}
4526 +	//TRACE("finished replenishing budgets\n");
4527 +
4528 +	/* request a scheduler update at the next replenishment instant */
4529 +	res = list_first_entry_or_null(&sup_env->depleted_reservations,
4530 +		struct reservation, list);
4531 +	if (res)
4532 +		sup_scheduler_update_at(sup_env, res->next_replenishment);
4533 +}
4534 +
4535 +void sup_update_time(
4536 +	struct sup_reservation_environment* sup_env,
4537 +	lt_t now)
4538 +{
4539 +	lt_t delta;
4540 +
4541 +	/* If the time didn't advance, there is nothing to do.
4542 +	 * This check makes it safe to call sup_advance_time() potentially
4543 +	 * multiple times (e.g., via different code paths. */
4544 +	//TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time);
4545 +	if (unlikely(now <= sup_env->env.current_time))
4546 +		return;
4547 +
4548 +	delta = now - sup_env->env.current_time;
4549 +	sup_env->env.current_time = now;
4550 +
4551 +	/* check if future updates are required */
4552 +	if (sup_env->next_scheduler_update <= sup_env->env.current_time)
4553 +		sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
4554 +
4555 +	/* deplete budgets by passage of time */
4556 +	//TRACE("CHARGE###\n");
4557 +	sup_charge_budget(sup_env, delta);
4558 +
4559 +	/* check if any budgets where replenished */
4560 +	//TRACE("REPLENISH###\n");
4561 +	sup_replenish_budgets(sup_env);
4562 +}
4563 +
4564 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env)
4565 +{
4566 +	struct reservation *res, *next;
4567 +	struct task_struct *tsk = NULL;
4568 +	lt_t time_slice;
4569 +
4570 +	list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
4571 +		if (res->state == RESERVATION_ACTIVE) {
4572 +			tsk = res->ops->dispatch_client(res, &time_slice);
4573 +			if (likely(tsk)) {
4574 +				if (time_slice)
4575 +				    sup_scheduler_update_after(sup_env, time_slice);
4576 +				sup_scheduler_update_after(sup_env, res->cur_budget);
4577 +				return tsk;
4578 +			}
4579 +		}
4580 +	}
4581 +
4582 +	return NULL;
4583 +}
4584 +
4585 +static void sup_res_change_state(
4586 +	struct reservation_environment* env,
4587 +	struct reservation *res,
4588 +	reservation_state_t new_state)
4589 +{
4590 +	struct sup_reservation_environment* sup_env;
4591 +
4592 +	sup_env = container_of(env, struct sup_reservation_environment, env);
4593 +
4594 +	TRACE("reservation R%d state %d->%d at %llu\n",
4595 +		res->id, res->state, new_state, env->current_time);
4596 +
4597 +	list_del(&res->list);
4598 +	/* check if we need to reschedule because we lost an active reservation */
4599 +	if (res->state == RESERVATION_ACTIVE && !sup_env->will_schedule)
4600 +		sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
4601 +	res->state = new_state;
4602 +	sup_queue_reservation(sup_env, res);
4603 +}
4604 +
4605 +void sup_init(struct sup_reservation_environment* sup_env)
4606 +{
4607 +	memset(sup_env, sizeof(*sup_env), 0);
4608 +
4609 +	INIT_LIST_HEAD(&sup_env->active_reservations);
4610 +	INIT_LIST_HEAD(&sup_env->depleted_reservations);
4611 +	INIT_LIST_HEAD(&sup_env->inactive_reservations);
4612 +
4613 +	sup_env->env.change_state = sup_res_change_state;
4614 +
4615 +	sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
4616 +}
4617 +
4618 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
4619 +	unsigned int id)
4620 +{
4621 +	struct reservation *res;
4622 +
4623 +	list_for_each_entry(res, &gmp_env->active_reservations, list) {
4624 +		if (res->id == id)
4625 +			return res;
4626 +	}
4627 +	list_for_each_entry(res, &gmp_env->inactive_reservations, list) {
4628 +		if (res->id == id)
4629 +			return res;
4630 +	}
4631 +	list_for_each_entry(res, &gmp_env->depleted_reservations, list) {
4632 +		if (res->id == id)
4633 +			return res;
4634 +	}
4635 +
4636 +	return NULL;
4637 +}
4638 +
4639 +
4640 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env,
4641 +	unsigned int id)
4642 +{
4643 +	struct next_timer_event *event;
4644 +
4645 +	list_for_each_entry(event, &gmp_env->next_events, list) {
4646 +		if (event->id == id)
4647 +			return event;
4648 +	}
4649 +
4650 +	return NULL;
4651 +}
4652 +
4653 +
4654 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env,
4655 +	lt_t when)
4656 +{
4657 +	struct next_timer_event *event;
4658 +
4659 +	list_for_each_entry(event, &gmp_env->next_events, list) {
4660 +		if (event->next_update == when)
4661 +			return event;
4662 +	}
4663 +
4664 +	return NULL;
4665 +}
4666 +
4667 +#define TIMER_RESOLUTION 100000L
4668 +
4669 +static void gmp_add_event(
4670 +	struct gmp_reservation_environment* gmp_env,
4671 +	lt_t when, unsigned int id, event_type_t type)
4672 +{
4673 +	struct next_timer_event *nevent, *queued;
4674 +	struct list_head *pos;
4675 +	int found = 0, update = 0;
4676 +
4677 +	//when = div64_u64(when, TIMER_RESOLUTION);
4678 +	//when *= TIMER_RESOLUTION;
4679 +//printk(KERN_ALERT "GMP_ADD id=%d type=%d when=%llu\n", id, type, when);
4680 +	nevent = gmp_find_event_by_id(gmp_env, id);
4681 +	
4682 +	if (nevent)
4683 +		TRACE("EVENT R%d update prev = %llu, new = %llu\n", nevent->id, nevent->next_update, when);
4684 +	
4685 +	if (nevent && nevent->next_update > when) {
4686 +		list_del(&nevent->list);
4687 +		update = 1;
4688 +		
4689 +	}
4690 +	
4691 +	if (!nevent || nevent->type != type || update == 1) {
4692 +		if (update == 0)
4693 +			nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
4694 +		BUG_ON(!nevent);
4695 +		nevent->next_update = when;
4696 +		nevent->id = id;
4697 +		nevent->type = type;
4698 +		nevent->timer_armed_on = NO_CPU;
4699 +
4700 +		list_for_each(pos, &gmp_env->next_events) {
4701 +			queued = list_entry(pos, struct next_timer_event, list);
4702 +			if (queued->next_update > nevent->next_update) {
4703 +				list_add(&nevent->list, pos->prev);
4704 +				found = 1;
4705 +				TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at before %llu\n", nevent->id, nevent->type, nevent->next_update, queued->next_update);
4706 +				break;
4707 +			}
4708 +		}
4709 +		
4710 +		if (!found) {
4711 +			list_add_tail(&nevent->list, &gmp_env->next_events);
4712 +			TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update);
4713 +		}
4714 +	} else {
4715 +		//TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
4716 +; //printk(KERN_ALERT "EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
4717 +	}
4718 +	
4719 +	TRACE("======START PRINTING EVENT LIST======\n");
4720 +	gmp_print_events(gmp_env, litmus_clock());
4721 +	TRACE("======FINISH PRINTING EVENT LIST======\n");
4722 +}
4723 +
4724 +void gmp_add_event_after(
4725 +	struct gmp_reservation_environment* gmp_env, lt_t timeout, unsigned int id, event_type_t type)
4726 +{
4727 +	//printk(KERN_ALERT "ADD_EVENT_AFTER id = %d\n", id);
4728 +	gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type);
4729 +}
4730 +
4731 +static void gmp_queue_depleted(
4732 +	struct gmp_reservation_environment* gmp_env,
4733 +	struct reservation *res)
4734 +{
4735 +	struct list_head *pos;
4736 +	struct reservation *queued;
4737 +	int found = 0;
4738 +
4739 +//printk(KERN_ALERT "R%d request to enqueue depleted_list\n", res->id);
4740 +	
4741 +	list_for_each(pos, &gmp_env->depleted_reservations) {
4742 +		queued = list_entry(pos, struct reservation, list);
4743 +		if (queued && (queued->next_replenishment > res->next_replenishment)) {
4744 +//printk(KERN_ALERT "QUEUED R%d %llu\n", queued->id, queued->next_replenishment);
4745 +			list_add(&res->list, pos->prev);
4746 +			found = 1;
4747 +			break;
4748 +		}
4749 +	}
4750 +
4751 +	if (!found)
4752 +		list_add_tail(&res->list, &gmp_env->depleted_reservations);
4753 +
4754 +	TRACE("R%d queued to depleted_list\n", res->id);
4755 +//printk(KERN_ALERT "R%d queued to depleted_list\n", res->id);
4756 +	gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
4757 +}
4758 +
4759 +static void gmp_queue_active(
4760 +	struct gmp_reservation_environment* gmp_env,
4761 +	struct reservation *res)
4762 +{
4763 +	struct list_head *pos;
4764 +	struct reservation *queued;
4765 +	int check_preempt = 1, found = 0;
4766 +
4767 +	list_for_each(pos, &gmp_env->active_reservations) {
4768 +		queued = list_entry(pos, struct reservation, list);
4769 +		if (queued->priority > res->priority) {
4770 +			list_add(&res->list, pos->prev);
4771 +			found = 1;
4772 +			break;
4773 +		} else if (queued->scheduled_on == NO_CPU)
4774 +			check_preempt = 0;
4775 +	}
4776 +
4777 +	if (!found)
4778 +		list_add_tail(&res->list, &gmp_env->active_reservations);
4779 +
4780 +	/* check for possible preemption */
4781 +	if (res->state == RESERVATION_ACTIVE && check_preempt)
4782 +		gmp_env->schedule_now++;
4783 +
4784 +#if BUDGET_ENFORCEMENT_AT_C	
4785 +	gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
4786 +#endif
4787 +	res->event_added = 1;	
4788 +}
4789 +
4790 +static void gmp_queue_reservation(
4791 +	struct gmp_reservation_environment* gmp_env,
4792 +	struct reservation *res)
4793 +{
4794 +
4795 +//printk(KERN_ALERT "DEBUG: Passed %s %d %p R%d STATE %d\n",__FUNCTION__,__LINE__, gmp_env, res->id, res->state);
4796 +	switch (res->state) {
4797 +		case RESERVATION_INACTIVE:
4798 +			list_add(&res->list, &gmp_env->inactive_reservations);
4799 +			break;
4800 +
4801 +		case RESERVATION_DEPLETED:
4802 +			gmp_queue_depleted(gmp_env, res);
4803 +			break;
4804 +
4805 +		case RESERVATION_ACTIVE_IDLE:
4806 +		case RESERVATION_ACTIVE:
4807 +			gmp_queue_active(gmp_env, res);
4808 +			break;
4809 +	}
4810 +}
4811 +
4812 +void gmp_add_new_reservation(
4813 +	struct gmp_reservation_environment* gmp_env,
4814 +	struct reservation* new_res)
4815 +{
4816 +	new_res->env = &gmp_env->env;
4817 +	gmp_queue_reservation(gmp_env, new_res);
4818 +}
4819 +
4820 +#if BUDGET_ENFORCEMENT_AT_C
4821 +static void gmp_charge_budget(
4822 +	struct gmp_reservation_environment* gmp_env,
4823 +	lt_t delta)
4824 +{
4825 +	struct list_head *pos, *next;
4826 +	struct reservation *res;
4827 +
4828 +	list_for_each_safe(pos, next, &gmp_env->active_reservations) {
4829 +		int drained = 0;
4830 +		/* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
4831 +		res = list_entry(pos, struct reservation, list);
4832 +		if (res->state == RESERVATION_ACTIVE) {
4833 +			TRACE("gmp_charge_budget ACTIVE R%u scheduled_on=%d drain %llu\n", res->id, res->scheduled_on, delta);
4834 +			if (res->scheduled_on != NO_CPU && res->blocked_by_ghost == 0) {
4835 +				TRACE("DRAIN !!\n");
4836 +				drained = 1;
4837 +				res->ops->drain_budget(res, delta);
4838 +			} else {
4839 +				TRACE("NO DRAIN (not scheduled)!!\n");
4840 +			}
4841 +		} else {
4842 +			//BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
4843 +			if (res->state != RESERVATION_ACTIVE_IDLE)
4844 +				TRACE("BUG!!!!!!!!!!!! gmp_charge_budget()\n");
4845 +			TRACE("gmp_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
4846 +			//if (res->is_ghost != NO_CPU) {
4847 +				TRACE("DRAIN !!\n");
4848 +				drained = 1;
4849 +				res->ops->drain_budget(res, delta);
4850 +			//}
4851 +		}
4852 +		if ((res->state == RESERVATION_ACTIVE ||
4853 +			res->state == RESERVATION_ACTIVE_IDLE) && (drained == 1))
4854 +		{
4855 +			/* make sure scheduler is invoked when this reservation expires
4856 +			 * its remaining budget */
4857 +			 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
4858 +			 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
4859 +			 res->event_added = 1;
4860 +		}
4861 +		//if (encountered_active == 2)
4862 +			/* stop at the first ACTIVE reservation */
4863 +		//	break;
4864 +	}
4865 +	//TRACE("finished charging budgets\n");
4866 +}
4867 +#else
4868 +
4869 +static void gmp_charge_budget(
4870 +	struct gmp_reservation_environment* gmp_env,
4871 +	lt_t delta)
4872 +{
4873 +	return;
4874 +}
4875 +
4876 +#endif
4877 +
4878 +static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
4879 +{
4880 +	struct list_head *pos, *next;
4881 +	struct reservation *res;
4882 +
4883 +	list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
4884 +		res = list_entry(pos, struct reservation, list);
4885 +		if (res->next_replenishment <= gmp_env->env.current_time) {
4886 +			res->ops->replenish(res);
4887 +			if (res->is_ghost != NO_CPU) {
4888 +				TRACE("R%d replenished! scheduled_on=%d\n", res->id, res->scheduled_on);
4889 +			}
4890 +		} else {
4891 +			/* list is ordered by increasing depletion times */
4892 +			break;
4893 +		}
4894 +	}
4895 +	//TRACE("finished replenishing budgets\n");
4896 +}
4897 +
4898 +#define EPSILON	50
4899 +
4900 +/* return schedule_now */
4901 +int gmp_update_time(
4902 +	struct gmp_reservation_environment* gmp_env,
4903 +	lt_t now)
4904 +{
4905 +	struct next_timer_event *event, *next;
4906 +	lt_t delta, ret;
4907 +
4908 +	/* If the time didn't advance, there is nothing to do.
4909 +	 * This check makes it safe to call sup_advance_time() potentially
4910 +	 * multiple times (e.g., via different code paths. */
4911 +	//TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
4912 +	if (unlikely(now <= gmp_env->env.current_time + EPSILON))
4913 +		return 0;
4914 +
4915 +	delta = now - gmp_env->env.current_time;
4916 +	gmp_env->env.current_time = now;
4917 +
4918 +
4919 +	//gmp_print_events(gmp_env, now);
4920 +	/* deplete budgets by passage of time */
4921 +	//TRACE("CHARGE###\n");
4922 +	gmp_charge_budget(gmp_env, delta);
4923 +
4924 +	/* check if any budgets where replenished */
4925 +	//TRACE("REPLENISH###\n");
4926 +	gmp_replenish_budgets(gmp_env);
4927 +
4928 +	
4929 +	list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
4930 +		if (event->next_update < now) {
4931 +			list_del(&event->list);
4932 +			//TRACE("EVENT at %llu IS DELETED\n", event->next_update);
4933 +			kfree(event);
4934 +		} else {
4935 +			break;
4936 +		}
4937 +	}		
4938 +	
4939 +	//gmp_print_events(gmp_env, litmus_clock());
4940 +	
4941 +	ret = min(gmp_env->schedule_now, NR_CPUS);
4942 +	gmp_env->schedule_now = 0;
4943 +
4944 +	return ret;
4945 +}
4946 +
4947 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now)
4948 +{
4949 +	struct next_timer_event *event, *next;
4950 +
4951 +	TRACE("GLOBAL EVENTS now=%llu\n", now);
4952 +	list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
4953 +		TRACE("at %llu type=%d id=%d armed_on=%d\n", event->next_update, event->type, event->id, event->timer_armed_on);
4954 +	}		
4955 +}
4956 +
4957 +static void gmp_res_change_state(
4958 +	struct reservation_environment* env,
4959 +	struct reservation *res,
4960 +	reservation_state_t new_state)
4961 +{
4962 +	struct gmp_reservation_environment* gmp_env;
4963 +
4964 +	gmp_env = container_of(env, struct gmp_reservation_environment, env);
4965 +
4966 +	TRACE("GMP reservation R%d state %d->%d at %llu\n",
4967 +		res->id, res->state, new_state, env->current_time);
4968 +
4969 +	list_del(&res->list);
4970 +	/* check if we need to reschedule because we lost an active reservation */
4971 +	if (res->state == RESERVATION_ACTIVE)
4972 +		gmp_env->schedule_now++;
4973 +	res->state = new_state;
4974 +	gmp_queue_reservation(gmp_env, res);
4975 +}
4976 +
4977 +void gmp_init(struct gmp_reservation_environment* gmp_env)
4978 +{
4979 +	memset(gmp_env, sizeof(*gmp_env), 0);
4980 +
4981 +	INIT_LIST_HEAD(&gmp_env->active_reservations);
4982 +	INIT_LIST_HEAD(&gmp_env->depleted_reservations);
4983 +	INIT_LIST_HEAD(&gmp_env->inactive_reservations);
4984 +	INIT_LIST_HEAD(&gmp_env->next_events);
4985 +
4986 +	gmp_env->env.change_state = gmp_res_change_state;
4987 +
4988 +	gmp_env->schedule_now = 0;
4989 +	gmp_env->will_schedule = false;
4990 +	
4991 +	raw_spin_lock_init(&gmp_env->lock);
4992 +}
4993 diff --git litmus/sched_mc2.c litmus/sched_mc2.c
4994 new file mode 100644
4995 index 0000000..a2abda8
4996 --- /dev/null
4997 +++ litmus/sched_mc2.c
4998 @@ -0,0 +1,1919 @@
4999 +/*
5000 + * litmus/sched_mc2.c
5001 + *
5002 + * Implementation of the Mixed-Criticality on MultiCore scheduler
5003 + *
5004 + * Thus plugin implements a scheduling algorithm proposed in 
5005 + * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper.
5006 + */ 
5007 + 
5008 +#include <linux/percpu.h>
5009 +#include <linux/slab.h>
5010 +#include <asm/uaccess.h>
5011 +
5012 +#include <litmus/sched_plugin.h>
5013 +#include <litmus/preempt.h>
5014 +#include <litmus/debug_trace.h>
5015 +
5016 +#include <litmus/litmus.h>
5017 +#include <litmus/jobs.h>
5018 +#include <litmus/budget.h>
5019 +#include <litmus/litmus_proc.h>
5020 +#include <litmus/sched_trace.h>
5021 +#include <litmus/cache_proc.h>
5022 +#include <litmus/trace.h>
5023 +
5024 +#include <litmus/mc2_common.h>
5025 +#include <litmus/reservation.h>
5026 +#include <litmus/polling_reservations.h>
5027 +
5028 +#ifdef CONFIG_PGMRT_SUPPORT
5029 +#include <litmus/pgm.h>
5030 +#endif
5031 +
5032 +//#define TRACE(fmt, args...) do {} while (false)
5033 +//#define TRACE_TASK(fmt, args...) do {} while (false)
5034 +
5035 +#define BUDGET_ENFORCEMENT_AT_C 0
5036 +
5037 +extern void do_partition(enum crit_level lv, int cpu);
5038 +
5039 +/* _global_env - reservation container for level-C tasks*/
5040 +struct gmp_reservation_environment _global_env;
5041 +
5042 +/* cpu_entry - keep track of a running task on a cpu
5043 + * This state is used to decide the lowest priority cpu
5044 + */
5045 +struct cpu_entry {
5046 +	struct task_struct *scheduled;
5047 +	lt_t deadline;
5048 +	int cpu;
5049 +	enum crit_level lv;
5050 +	/* if will_schedule is true, this cpu is already selected and
5051 +	   call mc2_schedule() soon. */
5052 +	bool will_schedule;
5053 +};
5054 +
5055 +/* cpu_priority - a global state for choosing the lowest priority CPU */
5056 +struct cpu_priority {
5057 +	raw_spinlock_t lock;
5058 +	struct cpu_entry cpu_entries[NR_CPUS];
5059 +};
5060 +
5061 +struct cpu_priority _lowest_prio_cpu;
5062 +	
5063 +/* mc2_task_state - a task state structure */
5064 +struct mc2_task_state {
5065 +	struct task_client res_info;
5066 +	/* if cpu == -1, this task is a global task (level C) */
5067 +	int cpu;
5068 +	bool has_departed;
5069 +	struct mc2_task mc2_param;
5070 +};
5071 +
5072 +/* crit_entry - maintain the logically running job (ghost job) */
5073 +struct crit_entry {
5074 +	enum crit_level level;
5075 +	struct task_struct *running;
5076 +};
5077 +
5078 +/* mc2_cpu_state - maintain the scheduled state and ghost jobs
5079 + * timer : timer for partitioned tasks (level A and B)
5080 + * g_timer : timer for global tasks (level C)
5081 + */
5082 +struct mc2_cpu_state {
5083 +	raw_spinlock_t lock;
5084 +
5085 +	struct sup_reservation_environment sup_env;
5086 +	struct hrtimer timer;
5087 +
5088 +	int cpu;
5089 +	struct task_struct* scheduled;
5090 +	struct crit_entry crit_entries[NUM_CRIT_LEVELS];
5091 +};
5092 +
5093 +static int resched_cpu[NR_CPUS];
5094 +static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state);
5095 +static int level_a_priorities[NR_CPUS];
5096 +
5097 +#define cpu_state_for(cpu_id)	(&per_cpu(mc2_cpu_state, cpu_id))
5098 +#define local_cpu_state()	(this_cpu_ptr(&mc2_cpu_state))
5099 +
5100 +/* get_mc2_state - get the task's state */
5101 +static struct mc2_task_state* get_mc2_state(struct task_struct *tsk)
5102 +{
5103 +	struct mc2_task_state* tinfo;
5104 +	
5105 +	tinfo = (struct mc2_task_state*)tsk_rt(tsk)->plugin_state;
5106 +	
5107 +	if (tinfo)
5108 +		return tinfo;
5109 +	else
5110 +		return NULL;
5111 +}
5112 +
5113 +/* get_task_crit_level - return the criticaility level of a task */
5114 +static enum crit_level get_task_crit_level(struct task_struct *tsk)
5115 +{
5116 +	struct mc2_task *mp;
5117 +	
5118 +	if (!tsk || !is_realtime(tsk))
5119 +		return NUM_CRIT_LEVELS;
5120 +	
5121 +	mp = tsk_rt(tsk)->mc2_data;
5122 +	
5123 +	if (!mp)
5124 +		return NUM_CRIT_LEVELS;
5125 +	else
5126 +		return mp->crit;
5127 +}
5128 +
5129 +/* task_depart - remove a task from its reservation
5130 + *               If the job has remaining budget, convert it to a ghost job
5131 + *               and update crit_entries[]
5132 + *               
5133 + * @job_complete	indicate whether job completes or not              
5134 + */
5135 +static void task_departs(struct task_struct *tsk, int job_complete)
5136 +{
5137 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
5138 +	//struct mc2_cpu_state* state = local_cpu_state();
5139 +	struct reservation* res = NULL;
5140 +	struct reservation_client *client = NULL;
5141 +
5142 +	BUG_ON(!is_realtime(tsk));
5143 +	
5144 +	res    = tinfo->res_info.client.reservation;
5145 +	client = &tinfo->res_info.client;
5146 +	BUG_ON(!res);
5147 +	BUG_ON(!client);
5148 +
5149 +/* 9/18/2015 fix start - no ghost job handling, empty remaining budget */
5150 +	if (job_complete) {
5151 +		res->cur_budget = 0;
5152 +		sched_trace_task_completion(tsk, 0);
5153 +	}
5154 +/* fix end */
5155 +
5156 +	res->ops->client_departs(res, client, job_complete);
5157 +	tinfo->has_departed = true;
5158 +	TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock());
5159 +/* 9/18/2015 fix start - no remaining budget 
5160 + *	
5161 +	if (job_complete && res->cur_budget) {
5162 +		struct crit_entry* ce;
5163 +		enum crit_level lv = tinfo->mc2_param.crit;
5164 +		
5165 +		ce = &state->crit_entries[lv];
5166 +		ce->running = tsk;
5167 +		res->is_ghost = state->cpu;
5168 +#if BUDGET_ENFORCEMENT_AT_C		
5169 +		gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
5170 +#endif
5171 +		TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock());
5172 + 	}
5173 + * fix -end
5174 + */ 
5175 +
5176 +}
5177 +
5178 +/* task_arrive - put a task into its reservation
5179 + *               If the job was a ghost job, remove it from crit_entries[]
5180 + */
5181 +static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
5182 +{
5183 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
5184 +	struct reservation* res;
5185 +	struct reservation_client *client;
5186 +	enum crit_level lv = get_task_crit_level(tsk);
5187 +
5188 +	res    = tinfo->res_info.client.reservation;
5189 +	client = &tinfo->res_info.client;
5190 +
5191 +	tinfo->has_departed = false;
5192 +
5193 +	switch(lv) {
5194 +		case CRIT_LEVEL_A:
5195 +		case CRIT_LEVEL_B:
5196 +			TS_RELEASE_START;
5197 +			break;
5198 +		case CRIT_LEVEL_C:
5199 +			TS_RELEASE_C_START;
5200 +			break;
5201 +		default:
5202 +			break;
5203 +	}
5204 +	
5205 +	res->ops->client_arrives(res, client);
5206 +	TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock());
5207 +	
5208 +	if (lv != NUM_CRIT_LEVELS) {
5209 +		struct crit_entry *ce;
5210 +		ce = &state->crit_entries[lv];
5211 +		/* if the currrent task is a ghost job, remove it */
5212 +		if (ce->running == tsk)
5213 +			ce->running = NULL;
5214 +	}
5215 +	/* do we need this??
5216 +	if (resched_cpu[state->cpu]) 
5217 +		litmus_reschedule(state->cpu);
5218 +	*/
5219 +	
5220 +	switch(lv) {
5221 +		case CRIT_LEVEL_A:
5222 +		case CRIT_LEVEL_B:
5223 +			TS_RELEASE_END;
5224 +			break;
5225 +		case CRIT_LEVEL_C:
5226 +			TS_RELEASE_C_END;
5227 +			break;
5228 +		default:
5229 +			break;
5230 +	}	
5231 +}
5232 +
5233 +/* get_lowest_prio_cpu - return the lowest priority cpu
5234 + *                       This will be used for scheduling level-C tasks.
5235 + *                       If all CPUs are running tasks which has
5236 + *                       higher priority than level C, return NO_CPU.
5237 + */
5238 +static int get_lowest_prio_cpu(lt_t priority)
5239 +{
5240 +	struct cpu_entry *ce;
5241 +	int cpu, ret = NO_CPU;
5242 +	lt_t latest_deadline = 0;
5243 +	
5244 +	//raw_spin_lock(&_lowest_prio_cpu.lock);
5245 +	ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
5246 +	if (!ce->will_schedule && !ce->scheduled) {
5247 +		//raw_spin_unlock(&_lowest_prio_cpu.lock);
5248 +		TRACE("CPU %d (local) is the lowest!\n", ce->cpu);
5249 +		return ce->cpu;
5250 +	} else {
5251 +		TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0);
5252 +	}
5253 +
5254 +	for_each_online_cpu(cpu) {
5255 +		ce = &_lowest_prio_cpu.cpu_entries[cpu];
5256 +		/* If a CPU will call schedule() in the near future, we don't
5257 +		   return that CPU. */
5258 +		TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule,
5259 +	      ce->scheduled ? (ce->scheduled)->comm : "null",
5260 +	      ce->scheduled ? (ce->scheduled)->pid : 0,
5261 +	      ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0);
5262 +		if (!ce->will_schedule) {
5263 +			if (!ce->scheduled) {
5264 +				/* Idle cpu, return this. */
5265 +				//raw_spin_unlock(&_lowest_prio_cpu.lock);
5266 +				TRACE("CPU %d is the lowest!\n", ce->cpu);
5267 +				return ce->cpu;
5268 +			} else if (ce->lv == CRIT_LEVEL_C && 
5269 +			           ce->deadline > latest_deadline) {
5270 +				latest_deadline = ce->deadline;
5271 +				ret = ce->cpu;
5272 +			}
5273 +		}
5274 +	}		
5275 +	
5276 +	//raw_spin_unlock(&_lowest_prio_cpu.lock);
5277 +
5278 +	if (priority >= latest_deadline)
5279 +		ret = NO_CPU;
5280 +	
5281 +	TRACE("CPU %d is the lowest!\n", ret);
5282 +
5283 +	return ret;
5284 +}
5285 +
5286 +/* mc2_update_time - update time for a given criticality level. 
5287 + *                   caller must hold a proper lock
5288 + *                   (cpu_state lock or global lock)
5289 + */
5290 +/* 9/24/2015 temporally not using
5291 +static void mc2_update_time(enum crit_level lv, 
5292 +                            struct mc2_cpu_state *state, lt_t time)
5293 +{
5294 +	int global_schedule_now;
5295 +	
5296 +	if (lv < CRIT_LEVEL_C)
5297 +		sup_update_time(&state->sup_env, time);
5298 +	else if (lv == CRIT_LEVEL_C) {
5299 +		global_schedule_now = gmp_update_time(&_global_env, time);
5300 +		while (global_schedule_now--) {
5301 +			int cpu = get_lowest_prio_cpu(0);
5302 +			if (cpu != NO_CPU) {
5303 +				raw_spin_lock(&_lowest_prio_cpu.lock);
5304 +				_lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
5305 +				raw_spin_unlock(&_lowest_prio_cpu.lock);
5306 +				TRACE("LOWEST CPU = P%d\n", cpu);
5307 +				litmus_reschedule(cpu);
5308 +			}
5309 +		} 
5310 +	}
5311 +	else
5312 +		TRACE("update_time(): Criticality level error!!!!\n");
5313 +}
5314 +*/
5315 +
5316 +/* NOTE: drops state->lock */
5317 +/* mc2_update_timer_and_unlock - set a timer and g_timer and unlock 
5318 + *                               Whenever res_env.current_time is updated,
5319 + *                               we check next_scheduler_update and set 
5320 + *                               a timer.
5321 + *                               If there exist a global event which is 
5322 + *                               not armed on any CPU and g_timer is not
5323 + *                               active, set a g_timer for that event.
5324 + */
5325 +static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
5326 +{
5327 +	int local, cpus;
5328 +	lt_t update, now;
5329 +	//enum crit_level lv = get_task_crit_level(state->scheduled);
5330 +	struct next_timer_event *event, *next;
5331 +	int reschedule[NR_CPUS];
5332 +	
5333 +	for (cpus = 0; cpus<NR_CPUS; cpus++)
5334 +		reschedule[cpus] = 0;
5335 +	
5336 +	update = state->sup_env.next_scheduler_update;
5337 +	now = state->sup_env.env.current_time;
5338 +
5339 +	/* Be sure we're actually running on the right core,
5340 +	 * as pres_update_timer() is also called from pres_task_resume(),
5341 +	 * which might be called on any CPU when a thread resumes.
5342 +	 */
5343 +	local = local_cpu_state() == state;
5344 +
5345 +	raw_spin_lock(&_global_env.lock);
5346 +		
5347 +	list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
5348 +		/* If the event time is already passed, we call schedule() on
5349 +		   the lowest priority cpu */
5350 +		if (event->next_update >= update) {
5351 +			break;
5352 +		}
5353 +		
5354 +		if (event->next_update < litmus_clock()) {
5355 +			if (event->timer_armed_on == NO_CPU) {
5356 +				struct reservation *res = gmp_find_by_id(&_global_env, event->id);
5357 +				int cpu = get_lowest_prio_cpu(res?res->priority:0);
5358 +				TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
5359 +				list_del(&event->list);
5360 +				kfree(event);
5361 +				if (cpu != NO_CPU) {
5362 +					//raw_spin_lock(&_lowest_prio_cpu.lock);
5363 +					_lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
5364 +					//raw_spin_unlock(&_lowest_prio_cpu.lock);
5365 +					if (cpu == local_cpu_state()->cpu)
5366 +						litmus_reschedule_local();
5367 +					else
5368 +						reschedule[cpu] = 1;
5369 +				}
5370 +			}
5371 +		} else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) {
5372 +			event->timer_armed_on = state->cpu;
5373 +			update = event->next_update;
5374 +			break;
5375 +		}
5376 +	}
5377 +	
5378 +	/* Must drop state lock before calling into hrtimer_start(), which
5379 +	 * may raise a softirq, which in turn may wake ksoftirqd. */
5380 +	raw_spin_unlock(&_global_env.lock);
5381 +	raw_spin_unlock(&state->lock);
5382 +		
5383 +	if (update <= now || reschedule[state->cpu]) {
5384 +		reschedule[state->cpu] = 0;
5385 +		litmus_reschedule(state->cpu);
5386 +		/*
5387 +		raw_spin_lock(&state->lock);
5388 +		preempt_if_preemptable(state->scheduled, state->cpu);
5389 +		raw_spin_unlock(&state->lock);
5390 +		*/
5391 +	} else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) {
5392 +		/* Reprogram only if not already set correctly. */
5393 +		if (!hrtimer_active(&state->timer) ||
5394 +		    ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) {
5395 +			TRACE("canceling timer...at %llu\n", 
5396 +			      ktime_to_ns(hrtimer_get_expires(&state->timer)));
5397 +			hrtimer_cancel(&state->timer);
5398 +			TRACE("setting scheduler timer for %llu\n", update);
5399 +			/* We cannot use hrtimer_start() here because the
5400 +			 * wakeup flag must be set to zero. */
5401 +			__hrtimer_start_range_ns(&state->timer,
5402 +					ns_to_ktime(update),
5403 +					0 /* timer coalescing slack */,
5404 +					HRTIMER_MODE_ABS_PINNED,
5405 +					0 /* wakeup */);
5406 +			if (update < litmus_clock()) {
5407 +				/* uh oh, timer expired while trying to set it */
5408 +				TRACE("timer expired during setting "
5409 +				      "update:%llu now:%llu actual:%llu\n",
5410 +				      update, now, litmus_clock());
5411 +	 			/* The timer HW may not have been reprogrammed
5412 +	 			 * correctly; force rescheduling now. */
5413 +				litmus_reschedule(state->cpu);
5414 +			}
5415 +		}
5416 +	} else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) {
5417 +		/* Poke remote core only if timer needs to be set earlier than
5418 +		 * it is currently set.
5419 +		 */
5420 +		TRACE("mc2_update_timer for remote CPU %d (update=%llu, "
5421 +		      "active:%d, set:%llu)\n",
5422 +			state->cpu,
5423 +			update,
5424 +			hrtimer_active(&state->timer),
5425 +			ktime_to_ns(hrtimer_get_expires(&state->timer)));
5426 +		if (!hrtimer_active(&state->timer) ||
5427 +		    ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) {
5428 +			TRACE("poking CPU %d so that it can update its "
5429 +			       "scheduling timer (active:%d, set:%llu)\n",
5430 +			       state->cpu,
5431 +			       hrtimer_active(&state->timer),
5432 +			       ktime_to_ns(hrtimer_get_expires(&state->timer)));
5433 +			//litmus_reschedule(state->cpu);
5434 +			raw_spin_lock(&state->lock);
5435 +			preempt_if_preemptable(state->scheduled, state->cpu);
5436 +			raw_spin_unlock(&state->lock);
5437 +			reschedule[state->cpu] = 0;
5438 +		}
5439 +	}
5440 +	for (cpus = 0; cpus<NR_CPUS; cpus++) {
5441 +		if (reschedule[cpus]) {
5442 +			litmus_reschedule(cpus);
5443 +			/*
5444 +			struct mc2_cpu_state *remote_state;
5445 +			remote_state = cpu_state_for(cpus);
5446 +			raw_spin_lock(&remote_state->lock);
5447 +			preempt_if_preemptable(remote_state->scheduled, remote_state->cpu);
5448 +			raw_spin_unlock(&remote_state->lock);
5449 +			*/
5450 +		}
5451 +	}
5452 +}
5453 +
5454 +/* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs
5455 + *                          If the budget of a ghost is exhausted,
5456 + *                          clear is_ghost and reschedule
5457 + */
5458 +/*
5459 +static lt_t mc2_update_ghost_state(struct mc2_cpu_state *state)
5460 +{
5461 +	int lv = 0;
5462 +	struct crit_entry* ce;
5463 +	struct reservation *res;
5464 +	struct mc2_task_state *tinfo;
5465 +	lt_t ret = ULLONG_MAX;
5466 +	
5467 +	BUG_ON(!state);
5468 +	
5469 +	for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
5470 +		ce = &state->crit_entries[lv];
5471 +		if (ce->running != NULL) {
5472 +//printk(KERN_ALERT "P%d ce->running : %s/%d\n", state->cpu,  ce->running ? (ce->running)->comm : "null", ce->running ? (ce->running)->pid : 0);
5473 +			tinfo = get_mc2_state(ce->running);
5474 +			if (!tinfo)
5475 +				continue;
5476 +			
5477 +			res = res_find_by_id(state, tinfo->mc2_param.res_id);
5478 +			//BUG_ON(!res);
5479 +			if (!res) {
5480 +				printk(KERN_ALERT "mc2_update_ghost_state(): R%d not found!\n", tinfo->mc2_param.res_id);			
5481 +				return 0;
5482 +			}
5483 +			
5484 +			TRACE("LV %d running id %d budget %llu\n", 
5485 +			       lv, tinfo->mc2_param.res_id, res->cur_budget);
5486 +			// If the budget is exhausted, clear is_ghost and reschedule 
5487 +			if (!res->cur_budget) {
5488 +				struct sup_reservation_environment* sup_env = &state->sup_env;
5489 +				
5490 +				TRACE("GHOST FINISH id %d at %llu\n", 
5491 +				      tinfo->mc2_param.res_id, litmus_clock());
5492 +				ce->running = NULL;
5493 +				res->is_ghost = NO_CPU;
5494 +				
5495 +				if (lv < CRIT_LEVEL_C) {
5496 +					res = list_first_entry_or_null(
5497 +					      &sup_env->active_reservations, 
5498 +						  struct reservation, list);
5499 +					if (res)
5500 +						litmus_reschedule_local();
5501 +				} else if (lv == CRIT_LEVEL_C) {
5502 +					res = list_first_entry_or_null(
5503 +					      &_global_env.active_reservations,
5504 +						  struct reservation, list);
5505 +					if (res)
5506 +						litmus_reschedule(state->cpu);
5507 +				}
5508 +			} else {
5509 +				//TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget);
5510 +				//gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
5511 +				if (ret > res->cur_budget) {
5512 +					ret = res->cur_budget;
5513 +				}
5514 +			}
5515 +		}
5516 +	}
5517 +	
5518 +	return ret;
5519 +}			
5520 +*/
5521 +
5522 +/* update_cpu_prio - Update cpu's priority
5523 + *                   When a cpu picks a new task, call this function
5524 + *                   to update cpu priorities.
5525 + */
5526 +static void update_cpu_prio(struct mc2_cpu_state *state)
5527 +{
5528 +	struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu];
5529 +	enum crit_level lv = get_task_crit_level(state->scheduled);
5530 +	
5531 +	if (!state->scheduled) {
5532 +		/* cpu is idle. */
5533 +		ce->scheduled = NULL;
5534 +		ce->deadline = ULLONG_MAX;
5535 +		ce->lv = NUM_CRIT_LEVELS;
5536 +	} else if (lv == CRIT_LEVEL_C) {
5537 +		ce->scheduled = state->scheduled;
5538 +		ce->deadline = get_deadline(state->scheduled);
5539 +		ce->lv = lv;
5540 +	} else if (lv < CRIT_LEVEL_C) {
5541 +		/* If cpu is running level A or B tasks, it is not eligible
5542 +		   to run level-C tasks */
5543 +		ce->scheduled = state->scheduled;
5544 +		ce->deadline = 0;
5545 +		ce->lv = lv;
5546 +	}
5547 +};
5548 +
5549 +/* on_scheduling_timer - timer event for partitioned tasks
5550 + */                       
5551 +static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
5552 +{
5553 +	unsigned long flags;
5554 +	enum hrtimer_restart restart = HRTIMER_NORESTART;
5555 +	struct mc2_cpu_state *state;
5556 +	lt_t update, now;
5557 +	int global_schedule_now;
5558 +	//lt_t remain_budget; // no ghost jobs
5559 +	int reschedule[NR_CPUS];
5560 +	int cpus;
5561 +	
5562 +	for (cpus = 0; cpus<NR_CPUS; cpus++)
5563 +		reschedule[cpus] = 0;
5564 +	
5565 +	state = container_of(timer, struct mc2_cpu_state, timer);
5566 +
5567 +	/* The scheduling timer should only fire on the local CPU, because
5568 +	 * otherwise deadlocks via timer_cancel() are possible.
5569 +	 * Note: this does not interfere with dedicated interrupt handling, as
5570 +	 * even under dedicated interrupt handling scheduling timers for
5571 +	 * budget enforcement must occur locally on each CPU.
5572 +	 */
5573 +	BUG_ON(state->cpu != raw_smp_processor_id());
5574 +
5575 +	TS_ISR_START;
5576 +	
5577 +	TRACE("Timer fired at %llu\n", litmus_clock());
5578 +	//raw_spin_lock_irqsave(&_global_env.lock, flags);
5579 +	raw_spin_lock_irqsave(&state->lock, flags);
5580 +	now = litmus_clock();
5581 +	sup_update_time(&state->sup_env, now);
5582 +
5583 +/* 9/20/2015 fix - no ghost job 	
5584 +	remain_budget = mc2_update_ghost_state(state);
5585 +*/	
5586 +	update = state->sup_env.next_scheduler_update;
5587 +	now = state->sup_env.env.current_time;
5588 +
5589 +/* 9/20/2015 fix - no ghost job 
5590 +	if (remain_budget != ULLONG_MAX && update > now + remain_budget) {
5591 +		update = now + remain_budget;
5592 +	}
5593 +	
5594 +	TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d remain_budget:%llu\n", now, update, state->cpu, global_schedule_now, remain_budget);
5595 +*/	
5596 +
5597 +	if (update <= now) {
5598 +		litmus_reschedule_local();
5599 +	} else if (update != SUP_NO_SCHEDULER_UPDATE) {
5600 +		hrtimer_set_expires(timer, ns_to_ktime(update));
5601 +		restart = HRTIMER_RESTART;
5602 +	}
5603 +
5604 +	raw_spin_lock(&_global_env.lock);
5605 +	global_schedule_now = gmp_update_time(&_global_env, now);
5606 +	
5607 +	BUG_ON(global_schedule_now < 0 || global_schedule_now > 4);
5608 +	
5609 +	/* Find the lowest cpu, and call reschedule */
5610 +	while (global_schedule_now--) {
5611 +		int cpu = get_lowest_prio_cpu(0);
5612 +		if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
5613 +			//raw_spin_lock(&_lowest_prio_cpu.lock);
5614 +			_lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
5615 +			//raw_spin_unlock(&_lowest_prio_cpu.lock);
5616 +			TRACE("LOWEST CPU = P%d\n", cpu);
5617 +			if (cpu == state->cpu && update > now)
5618 +				litmus_reschedule_local();
5619 +			else
5620 +				reschedule[cpu] = 1;
5621 +		}
5622 +	} 
5623 +	raw_spin_unlock(&_global_env.lock);
5624 +	
5625 +	raw_spin_unlock_irqrestore(&state->lock, flags);
5626 +	//raw_spin_unlock_irqrestore(&_global_env.lock, flags);
5627 +	
5628 +	TS_ISR_END;
5629 +	
5630 +	for (cpus = 0; cpus<NR_CPUS; cpus++) {
5631 +		if (reschedule[cpus]) {
5632 +			litmus_reschedule(cpus);
5633 +			/*
5634 +			struct mc2_cpu_state *remote_state;
5635 +
5636 +			remote_state = cpu_state_for(cpus);
5637 +			raw_spin_lock(&remote_state->lock);
5638 +			preempt_if_preemptable(remote_state->scheduled, remote_state->cpu);
5639 +			raw_spin_unlock(&remote_state->lock);
5640 +			*/
5641 +		}
5642 +	}
5643 +	
5644 +	
5645 +	return restart;
5646 +}
5647 +
5648 +/* mc2_complete_job - syscall backend for job completions
5649 + */
5650 +static long mc2_complete_job(void)
5651 +{
5652 +	ktime_t next_release;
5653 +	long err;
5654 +
5655 +	tsk_rt(current)->completed = 1;
5656 +	
5657 +	/* If this the first job instance, we need to reset replenish
5658 +	   time to the next release time */
5659 +	if (tsk_rt(current)->sporadic_release) {
5660 +		struct mc2_cpu_state *state;
5661 +		struct reservation_environment *env;
5662 +		struct mc2_task_state *tinfo;
5663 +		struct reservation *res = NULL;
5664 +		unsigned long flags;
5665 +		enum crit_level lv;
5666 +
5667 +		preempt_disable();
5668 +		local_irq_save(flags);
5669 +		
5670 +		tinfo = get_mc2_state(current);
5671 +		lv = get_task_crit_level(current);
5672 +		
5673 +		if (lv < CRIT_LEVEL_C) {
5674 +			state = cpu_state_for(tinfo->cpu);
5675 +			raw_spin_lock(&state->lock);
5676 +			env = &(state->sup_env.env);
5677 +			res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
5678 +			env->time_zero = tsk_rt(current)->sporadic_release_time;
5679 +		}
5680 +		else if (lv == CRIT_LEVEL_C) {
5681 +			state = local_cpu_state();		
5682 +			raw_spin_lock(&state->lock);
5683 +			raw_spin_lock(&_global_env.lock);
5684 +			res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id);
5685 +			_global_env.env.time_zero = tsk_rt(current)->sporadic_release_time;
5686 +		}
5687 +		else
5688 +			BUG();
5689 +
5690 +		/* set next_replenishtime to synchronous release time */
5691 +		BUG_ON(!res);
5692 +		res->next_replenishment = tsk_rt(current)->sporadic_release_time;
5693 +/*		
5694 +		if (get_task_crit_level(current) == CRIT_LEVEL_A) {
5695 +			struct table_driven_reservation *tdres;
5696 +			tdres = container_of(res, struct table_driven_reservation, res);
5697 +			tdres->next_interval = 0;
5698 +			tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time;
5699 +			res->next_replenishment += tdres->intervals[0].start;			
5700 +		}
5701 +*/		
5702 +		res->cur_budget = 0;
5703 +		res->env->change_state(res->env, res, RESERVATION_DEPLETED);
5704 +		
5705 +		//TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update);
5706 +		
5707 +		//if (lv < CRIT_LEVEL_C)
5708 +//			raw_spin_unlock(&state->lock);
5709 +		//else 
5710 +		if (lv == CRIT_LEVEL_C)
5711 +			raw_spin_unlock(&_global_env.lock);
5712 +		
5713 +		raw_spin_unlock(&state->lock);
5714 +		local_irq_restore(flags);
5715 +		preempt_enable();
5716 +	}
5717 +	
5718 +	sched_trace_task_completion(current, 0);		
5719 +	/* update the next release time and deadline */
5720 +	prepare_for_next_period(current);
5721 +	sched_trace_task_release(current);
5722 +	next_release = ns_to_ktime(get_release(current));
5723 +	preempt_disable();
5724 +	TRACE_CUR("next_release=%llu\n", get_release(current));
5725 +	if (get_release(current) > litmus_clock()) {
5726 +		/* sleep until next_release */
5727 +		set_current_state(TASK_INTERRUPTIBLE);
5728 +		preempt_enable_no_resched();
5729 +		err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS);
5730 +	} else {
5731 +		/* release the next job immediately */
5732 +		err = 0;
5733 +		TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock());
5734 +		preempt_enable();
5735 +	}
5736 +
5737 +	TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock());
5738 +
5739 +	tsk_rt(current)->completed = 0;
5740 +	return err;
5741 +}
5742 +
5743 +/* mc2_dispatch - Select the next task to schedule.
5744 + */
5745 +struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state)
5746 +{
5747 +	struct reservation *res, *next;
5748 +	struct task_struct *tsk = NULL;
5749 +	struct crit_entry *ce;
5750 +	enum crit_level lv;
5751 +	lt_t time_slice;
5752 +
5753 +	list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
5754 +		if (res->state == RESERVATION_ACTIVE) {
5755 +			tsk = res->ops->dispatch_client(res, &time_slice);
5756 +			if (likely(tsk)) {
5757 +				lv = get_task_crit_level(tsk);
5758 +				if (lv == NUM_CRIT_LEVELS) {
5759 +					sup_scheduler_update_after(sup_env, res->cur_budget);
5760 +					return tsk;
5761 +				} else {
5762 +					ce = &state->crit_entries[lv];
5763 +					sup_scheduler_update_after(sup_env, res->cur_budget);
5764 +					res->blocked_by_ghost = 0;
5765 +					res->is_ghost = NO_CPU;
5766 +					return tsk;
5767 +/* no ghost jobs
5768 +					if (likely(!ce->running)) {
5769 +						sup_scheduler_update_after(sup_env, res->cur_budget);
5770 +						res->blocked_by_ghost = 0;
5771 +						res->is_ghost = NO_CPU;
5772 +						return tsk;
5773 +					} else {
5774 +						res->blocked_by_ghost = 1;
5775 +						TRACE_TASK(ce->running, " is GHOST\n");
5776 +					}
5777 +*/
5778 +				}
5779 +			}
5780 +		}
5781 +	}
5782 +	
5783 +	return NULL;
5784 +}
5785 +
5786 +struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
5787 +{
5788 +	struct reservation *res, *next;
5789 +	struct task_struct *tsk = NULL;
5790 +	//struct crit_entry *ce;
5791 +	enum crit_level lv;
5792 +	lt_t time_slice;
5793 +	
5794 +	/* no eligible level A or B tasks exists */
5795 +	/* check the ghost job */
5796 +	/*
5797 +	ce = &state->crit_entries[CRIT_LEVEL_C];
5798 +	if (ce->running) {
5799 +		TRACE_TASK(ce->running," is GHOST\n");
5800 +		return NULL;
5801 +	}
5802 +	*/
5803 +	list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
5804 +		BUG_ON(!res);
5805 +		if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
5806 +			tsk = res->ops->dispatch_client(res, &time_slice);
5807 +			if (likely(tsk)) {
5808 +				lv = get_task_crit_level(tsk);
5809 +				if (lv == NUM_CRIT_LEVELS) {
5810 +#if BUDGET_ENFORCEMENT_AT_C			
5811 +					gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
5812 +#endif
5813 +					res->event_added = 1;
5814 +					res->blocked_by_ghost = 0;
5815 +					res->is_ghost = NO_CPU;
5816 +					res->scheduled_on = state->cpu;
5817 +					return tsk;
5818 +				} else if (lv == CRIT_LEVEL_C) {
5819 +					//ce = &state->crit_entries[lv];
5820 +					//if (likely(!ce->running)) {
5821 +#if BUDGET_ENFORCEMENT_AT_C
5822 +						gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
5823 +#endif
5824 +						res->event_added = 1;
5825 +						res->blocked_by_ghost = 0;
5826 +						res->is_ghost = NO_CPU;
5827 +						res->scheduled_on = state->cpu;
5828 +						return tsk;
5829 +					//} else {
5830 +					//	res->blocked_by_ghost = 1;
5831 +					//	TRACE_TASK(ce->running, " is GHOST\n");
5832 +					//	return NULL;
5833 +					//}
5834 +				} else {
5835 +					BUG();
5836 +				}
5837 +			}
5838 +		}
5839 +	}
5840 +	
5841 +	return NULL;
5842 +}
5843 +
5844 +static inline void pre_schedule(struct task_struct *prev, int cpu)
5845 +{
5846 +	TS_SCHED_A_START;
5847 +	TS_SCHED_C_START;
5848 +	
5849 +	if (!prev || !is_realtime(prev))
5850 +		return;
5851 +	
5852 +	do_partition(CRIT_LEVEL_C, cpu);
5853 +}
5854 +
5855 +static inline void post_schedule(struct task_struct *next, int cpu)
5856 +{
5857 +	enum crit_level lev;
5858 +	if ((!next) || !is_realtime(next))
5859 +		return;
5860 +
5861 +	lev = get_task_crit_level(next);
5862 +	do_partition(lev, cpu);
5863 +	
5864 +	switch(lev) {
5865 +		case CRIT_LEVEL_A:
5866 +		case CRIT_LEVEL_B:
5867 +			TS_SCHED_A_END(next);
5868 +			break;
5869 +		case CRIT_LEVEL_C:
5870 +			TS_SCHED_C_END(next);
5871 +			break;
5872 +		default:
5873 +			break;
5874 +	}
5875 +	
5876 +}
5877 +
5878 +/* mc2_schedule - main scheduler function. pick the next task to run
5879 + */
5880 +static struct task_struct* mc2_schedule(struct task_struct * prev)
5881 +{
5882 +	int np, blocks, exists, preempt, to_schedule;
5883 +	/* next == NULL means "schedule background work". */
5884 +	lt_t now;
5885 +	struct mc2_cpu_state *state = local_cpu_state();
5886 +	
5887 +	pre_schedule(prev, state->cpu);
5888 +	
5889 +	/* 9/20/2015 fix
5890 +	raw_spin_lock(&_global_env.lock);
5891 +	*/
5892 +	raw_spin_lock(&state->lock);
5893 +	
5894 +	//BUG_ON(state->scheduled && state->scheduled != prev);
5895 +	//BUG_ON(state->scheduled && !is_realtime(prev));
5896 +	if (state->scheduled && state->scheduled != prev)
5897 +		printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null");
5898 +	if (state->scheduled && !is_realtime(prev))
5899 +		printk(KERN_ALERT "BUG2!!!!!!!! \n");
5900 +
5901 +	/* (0) Determine state */
5902 +	exists = state->scheduled != NULL;
5903 +	blocks = exists && !is_current_running();
5904 +	np = exists && is_np(state->scheduled);
5905 +	
5906 +	raw_spin_lock(&_global_env.lock);
5907 +	preempt = resched_cpu[state->cpu];
5908 +	resched_cpu[state->cpu] = 0;
5909 +	raw_spin_unlock(&_global_env.lock);
5910 +
5911 +	/* update time */
5912 +	state->sup_env.will_schedule = true;
5913 +
5914 +	now = litmus_clock();
5915 +	sup_update_time(&state->sup_env, now);
5916 +	/* 9/20/2015 fix */
5917 +	//raw_spin_lock(&_global_env.lock);
5918 +	//to_schedule = gmp_update_time(&_global_env, now);
5919 +	//raw_spin_unlock(&_global_env.lock);
5920 +	
5921 +	/* 9/20/2015 fix 
5922 +	mc2_update_ghost_state(state);	
5923 +	*/
5924 +	
5925 +	/* remove task from reservation if it blocks */
5926 +	/*
5927 +	if (is_realtime(prev) && !is_running(prev)) {
5928 +		if (get_task_crit_level(prev) == CRIT_LEVEL_C)
5929 +			raw_spin_lock(&_global_env.lock);
5930 +		task_departs(prev, is_completed(prev));
5931 +		if (get_task_crit_level(prev) == CRIT_LEVEL_C)
5932 +			raw_spin_unlock(&_global_env.lock);
5933 +	}*/
5934 +	if (is_realtime(current) && blocks) {
5935 +		if (get_task_crit_level(current) == CRIT_LEVEL_C)
5936 +			raw_spin_lock(&_global_env.lock);
5937 +		task_departs(current, is_completed(current));
5938 +		if (get_task_crit_level(current) == CRIT_LEVEL_C)
5939 +			raw_spin_unlock(&_global_env.lock);
5940 +	}
5941 +	
5942 +	/* figure out what to schedule next */
5943 +	if (!np)
5944 +		state->scheduled = mc2_dispatch(&state->sup_env, state);
5945 +
5946 +	if (!state->scheduled) {
5947 +		raw_spin_lock(&_global_env.lock);
5948 +		to_schedule = gmp_update_time(&_global_env, now);
5949 +		state->scheduled = mc2_global_dispatch(state);
5950 +		_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
5951 +		update_cpu_prio(state);
5952 +		raw_spin_unlock(&_global_env.lock);
5953 +	} else {
5954 +		raw_spin_lock(&_global_env.lock);
5955 +		_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
5956 +		update_cpu_prio(state);
5957 +		raw_spin_unlock(&_global_env.lock);
5958 +	}
5959 +	
5960 +	//raw_spin_lock(&_lowest_prio_cpu.lock);
5961 +	//_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
5962 +	//update_cpu_prio(state);
5963 +	//raw_spin_unlock(&_lowest_prio_cpu.lock);
5964 +	
5965 +	/* Notify LITMUS^RT core that we've arrived at a scheduling decision. */
5966 +	sched_state_task_picked();
5967 +
5968 +	/* program scheduler timer */
5969 +	state->sup_env.will_schedule = false;
5970 +		
5971 +	/* NOTE: drops state->lock */
5972 +	mc2_update_timer_and_unlock(state);
5973 +
5974 +	if (prev != state->scheduled && is_realtime(prev)) {
5975 +		struct mc2_task_state* tinfo = get_mc2_state(prev);
5976 +		struct reservation* res = tinfo->res_info.client.reservation;
5977 +		TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on);
5978 +		res->scheduled_on = NO_CPU;
5979 +		TRACE_TASK(prev, "descheduled.\n");
5980 +		/* if prev is preempted and a global task, find the lowest cpu and reschedule */
5981 +		if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
5982 +			int cpu;
5983 +			raw_spin_lock(&_global_env.lock);
5984 +			cpu = get_lowest_prio_cpu(res?res->priority:0);
5985 +			TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu);
5986 +			if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
5987 +				//raw_spin_lock(&_lowest_prio_cpu.lock);
5988 +				_lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
5989 +				resched_cpu[cpu] = 1;
5990 +				//raw_spin_unlock(&_lowest_prio_cpu.lock);
5991 +			}
5992 +			raw_spin_unlock(&_global_env.lock);
5993 +		}
5994 +	}
5995 +	
5996 +	if (to_schedule != 0) {
5997 +		raw_spin_lock(&_global_env.lock);
5998 +		while (to_schedule--) {
5999 +			int cpu = get_lowest_prio_cpu(0);
6000 +			if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
6001 +				_lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
6002 +				resched_cpu[cpu] = 1;
6003 +			}
6004 +		}
6005 +		raw_spin_unlock(&_global_env.lock);	
6006 +	}
6007 +
6008 +	if (state->scheduled) {
6009 +		TRACE_TASK(state->scheduled, "scheduled.\n");
6010 +	}
6011 +	
6012 +	post_schedule(state->scheduled, state->cpu);
6013 +	
6014 +	return state->scheduled;
6015 +}
6016 +
6017 +static void resume_legacy_task_model_updates(struct task_struct *tsk)
6018 +{
6019 +	lt_t now;
6020 +	if (is_sporadic(tsk)) {
6021 +		/* If this sporadic task was gone for a "long" time and woke up past
6022 +		 * its deadline, then give it a new budget by triggering a job
6023 +		 * release. This is purely cosmetic and has no effect on the
6024 +		 * MC2 scheduler. */
6025 +
6026 +		now = litmus_clock();
6027 +		if (is_tardy(tsk, now)) {
6028 +			//release_at(tsk, now);
6029 +			//sched_trace_task_release(tsk);
6030 +		}
6031 +	}
6032 +}
6033 +
6034 +/* mc2_task_resume - Called when the state of tsk changes back to 
6035 + *                   TASK_RUNNING. We need to requeue the task.
6036 + */
6037 +static void mc2_task_resume(struct task_struct  *tsk)
6038 +{
6039 +	unsigned long flags;
6040 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
6041 +	struct mc2_cpu_state *state;
6042 +
6043 +	TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
6044 +
6045 +	local_irq_save(flags);
6046 +	if (tinfo->cpu != -1)
6047 +		state = cpu_state_for(tinfo->cpu);
6048 +	else
6049 +		state = local_cpu_state();
6050 +
6051 +	/* 9/20/2015 fix
6052 +	raw_spin_lock(&_global_env.lock);
6053 +	*/
6054 +	/* Requeue only if self-suspension was already processed. */
6055 +	if (tinfo->has_departed)
6056 +	{
6057 +		/* We don't want to consider jobs before synchronous releases */
6058 +		if (tsk_rt(tsk)->job_params.job_no > 5) {
6059 +			switch(get_task_crit_level(tsk)) {
6060 +				case CRIT_LEVEL_A:
6061 +					TS_RELEASE_LATENCY_A(get_release(tsk));
6062 +					break;
6063 +				case CRIT_LEVEL_B:
6064 +					TS_RELEASE_LATENCY_B(get_release(tsk));
6065 +					break;
6066 +				case CRIT_LEVEL_C:
6067 +					TS_RELEASE_LATENCY_C(get_release(tsk));
6068 +					break;
6069 +				default:
6070 +					break;
6071 +			}
6072 +		}
6073 +
6074 +		raw_spin_lock(&state->lock);
6075 +		/* Assumption: litmus_clock() is synchronized across cores,
6076 +		 * since we might not actually be executing on tinfo->cpu
6077 +		 * at the moment. */
6078 +		if (tinfo->cpu != -1) {
6079 +			sup_update_time(&state->sup_env, litmus_clock());
6080 +			task_arrives(state, tsk);
6081 +		} else {
6082 +			raw_spin_lock(&_global_env.lock);
6083 +			gmp_update_time(&_global_env, litmus_clock());
6084 +			task_arrives(state, tsk);
6085 +			raw_spin_unlock(&_global_env.lock);
6086 +		}
6087 +			
6088 +		/* 9/20/2015 fix 
6089 +		mc2_update_ghost_state(state);
6090 +		*/
6091 +		//task_arrives(state, tsk);
6092 +		/* NOTE: drops state->lock */
6093 +		TRACE_TASK(tsk, "mc2_resume()\n");
6094 +		mc2_update_timer_and_unlock(state);	
6095 +	} else {
6096 +		TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
6097 +		//raw_spin_unlock(&_global_env.lock);
6098 +	}
6099 +
6100 +	local_irq_restore(flags);
6101 +	
6102 +	//gmp_free_passed_event();
6103 +	resume_legacy_task_model_updates(tsk);
6104 +}
6105 +
6106 +
6107 +/* mc2_admit_task - Setup mc2 task parameters
6108 + */
6109 +static long mc2_admit_task(struct task_struct *tsk)
6110 +{
6111 +	long err = -ESRCH;
6112 +	unsigned long flags;
6113 +	struct reservation *res;
6114 +	struct mc2_cpu_state *state;
6115 +	struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC);
6116 +	struct mc2_task *mp = tsk_rt(tsk)->mc2_data;
6117 +	enum crit_level lv;
6118 +	
6119 +	if (!tinfo)
6120 +		return -ENOMEM;
6121 +
6122 +	if (!mp) {
6123 +		printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n");
6124 +		return err;
6125 +	}
6126 +	
6127 +	lv = mp->crit;
6128 +	preempt_disable();
6129 +
6130 +	if (lv < CRIT_LEVEL_C) {
6131 +		state = cpu_state_for(task_cpu(tsk));
6132 +		raw_spin_lock_irqsave(&state->lock, flags);
6133 +
6134 +		res = sup_find_by_id(&state->sup_env, mp->res_id);
6135 +
6136 +		/* found the appropriate reservation */
6137 +		if (res) {
6138 +			TRACE_TASK(tsk, "SUP FOUND RES ID\n");
6139 +			tinfo->mc2_param.crit = mp->crit;
6140 +			tinfo->mc2_param.res_id = mp->res_id;
6141 +		
6142 +			/* initial values */
6143 +			err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
6144 +			tinfo->cpu = task_cpu(tsk);
6145 +			tinfo->has_departed = true;
6146 +			tsk_rt(tsk)->plugin_state = tinfo;
6147 +
6148 +			/* disable LITMUS^RT's per-thread budget enforcement */
6149 +			tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
6150 +		}
6151 +
6152 +		raw_spin_unlock_irqrestore(&state->lock, flags);
6153 +	} else if (lv == CRIT_LEVEL_C) {
6154 +		state = local_cpu_state();
6155 +		raw_spin_lock_irqsave(&state->lock, flags);
6156 +		raw_spin_lock(&_global_env.lock);
6157 +		//state = local_cpu_state();
6158 +		
6159 +		//raw_spin_lock(&state->lock);
6160 +		
6161 +		res = gmp_find_by_id(&_global_env, mp->res_id);
6162 +
6163 +		/* found the appropriate reservation (or vCPU) */
6164 +		if (res) {
6165 +			TRACE_TASK(tsk, "GMP FOUND RES ID\n");
6166 +			tinfo->mc2_param.crit = mp->crit;
6167 +			tinfo->mc2_param.res_id = mp->res_id;
6168 +			
6169 +			/* initial values */
6170 +			err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
6171 +			tinfo->cpu = -1;
6172 +			tinfo->has_departed = true;
6173 +			tsk_rt(tsk)->plugin_state = tinfo;
6174 +
6175 +			/* disable LITMUS^RT's per-thread budget enforcement */
6176 +			tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
6177 +		}
6178 +
6179 +		raw_spin_unlock(&_global_env.lock);
6180 +		raw_spin_unlock_irqrestore(&state->lock, flags);	
6181 +	}
6182 +	
6183 +	preempt_enable();
6184 +
6185 +	if (err)
6186 +		kfree(tinfo);
6187 +
6188 +	return err;
6189 +}
6190 +
6191 +/* mc2_task_new - A new real-time job is arrived. Release the next job
6192 + *                at the next reservation replenish time
6193 + */
6194 +static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
6195 +			  int is_running)
6196 +{
6197 +	unsigned long flags;
6198 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
6199 +	struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu);
6200 +	struct reservation *res;
6201 +	enum crit_level lv = get_task_crit_level(tsk);
6202 +	lt_t release = 0;
6203 +
6204 +	TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
6205 +		   litmus_clock(), on_runqueue, is_running);
6206 +
6207 +	if (tinfo->cpu == -1)
6208 +		state = local_cpu_state();
6209 +	else 
6210 +		state = cpu_state_for(tinfo->cpu);
6211 +	
6212 +	local_irq_save(flags);
6213 +	
6214 +	/* acquire the lock protecting the state and disable interrupts */
6215 +	//raw_spin_lock(&_global_env.lock);
6216 +	//raw_spin_lock(&state->lock);
6217 +	if (is_running) {
6218 +		state->scheduled = tsk;
6219 +		/* make sure this task should actually be running */
6220 +		litmus_reschedule_local();
6221 +	}
6222 +	
6223 +	raw_spin_lock(&state->lock);
6224 +
6225 +	if (lv == CRIT_LEVEL_C) {
6226 +		raw_spin_lock(&_global_env.lock);
6227 +		res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id);
6228 +	}
6229 +	else {
6230 +		res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
6231 +	}
6232 +	//res = res_find_by_id(state, tinfo->mc2_param.res_id);
6233 +	release = res->next_replenishment;
6234 +	
6235 +	if (on_runqueue || is_running) {
6236 +		/* Assumption: litmus_clock() is synchronized across cores
6237 +		 * [see comment in pres_task_resume()] */
6238 +		if (lv == CRIT_LEVEL_C) {
6239 +			gmp_update_time(&_global_env, litmus_clock());
6240 +			//raw_spin_unlock(&_global_env.lock);
6241 +		}
6242 +		else
6243 +			sup_update_time(&state->sup_env, litmus_clock());
6244 +		//mc2_update_time(lv, state, litmus_clock());
6245 +		/* 9/20/2015 fix 
6246 +		mc2_update_ghost_state(state);
6247 +		*/
6248 +		task_arrives(state, tsk);
6249 +		if (lv == CRIT_LEVEL_C)
6250 +			raw_spin_unlock(&_global_env.lock);
6251 +		/* NOTE: drops state->lock */
6252 +		TRACE("mc2_new()\n");
6253 +		
6254 +		mc2_update_timer_and_unlock(state);
6255 +	} else {
6256 +		if (lv == CRIT_LEVEL_C)
6257 +			raw_spin_unlock(&_global_env.lock);
6258 +		raw_spin_unlock(&state->lock);
6259 +		//raw_spin_unlock(&_global_env.lock);
6260 +	}
6261 +	local_irq_restore(flags);
6262 +	
6263 +	if (!release) {
6264 +		TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release);
6265 +		//release_at(tsk, release);
6266 +	}
6267 +	else
6268 +		TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n");
6269 +}
6270 +
6271 +/* mc2_reservation_destroy - reservation_destroy system call backend
6272 + */
6273 +static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
6274 +{
6275 +	long ret = -EINVAL;
6276 +	struct mc2_cpu_state *state;
6277 +	struct reservation *res = NULL, *next;
6278 +	struct sup_reservation_environment *sup_env;
6279 +	int found = 0;
6280 +	//enum crit_level lv = get_task_crit_level(current);
6281 +	unsigned long flags;
6282 +	
6283 +	if (cpu == -1) {
6284 +		/* if the reservation is global reservation */
6285 +		local_irq_save(flags);
6286 +		//state = local_cpu_state();
6287 +		raw_spin_lock(&_global_env.lock);
6288 +		//raw_spin_lock(&state->lock);
6289 +		
6290 +		list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) {
6291 +			if (res->id == reservation_id) {
6292 +				list_del(&res->list);
6293 +				kfree(res);
6294 +				found = 1;
6295 +				ret = 0;
6296 +			}
6297 +		}
6298 +		if (!found) {
6299 +			list_for_each_entry_safe(res, next, &_global_env.inactive_reservations, list) {
6300 +				if (res->id == reservation_id) {
6301 +					list_del(&res->list);
6302 +					kfree(res);
6303 +					found = 1;
6304 +					ret = 0;
6305 +				}
6306 +			}
6307 +		}
6308 +		if (!found) {
6309 +			list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
6310 +				if (res->id == reservation_id) {
6311 +					list_del(&res->list);
6312 +					kfree(res);
6313 +					found = 1;
6314 +					ret = 0;
6315 +				}
6316 +			}
6317 +		}
6318 +
6319 +		//raw_spin_unlock(&state->lock);
6320 +		raw_spin_unlock(&_global_env.lock);
6321 +		local_irq_restore(flags);
6322 +	} else {
6323 +		/* if the reservation is partitioned reservation */
6324 +		state = cpu_state_for(cpu);
6325 +		local_irq_save(flags);
6326 +		raw_spin_lock(&state->lock);
6327 +		
6328 +	//	res = sup_find_by_id(&state->sup_env, reservation_id);
6329 +		sup_env = &state->sup_env;
6330 +		list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
6331 +			if (res->id == reservation_id) {
6332 +/*
6333 +			if (lv == CRIT_LEVEL_A) {
6334 +					struct table_driven_reservation *tdres;
6335 +					tdres = container_of(res, struct table_driven_reservation, res);
6336 +					kfree(tdres->intervals);
6337 +			}
6338 +*/
6339 +				list_del(&res->list);
6340 +				kfree(res);
6341 +				found = 1;
6342 +				ret = 0;
6343 +			}
6344 +		}
6345 +		if (!found) {
6346 +			list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
6347 +				if (res->id == reservation_id) {
6348 +/*					if (lv == CRIT_LEVEL_A) {
6349 +						struct table_driven_reservation *tdres;
6350 +						tdres = container_of(res, struct table_driven_reservation, res);
6351 +						kfree(tdres->intervals);
6352 +					}
6353 +*/
6354 +					list_del(&res->list);
6355 +					kfree(res);
6356 +					found = 1;
6357 +					ret = 0;
6358 +				}
6359 +			}
6360 +		}
6361 +		if (!found) {
6362 +			list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
6363 +				if (res->id == reservation_id) {
6364 +/*					if (lv == CRIT_LEVEL_A) {
6365 +						struct table_driven_reservation *tdres;
6366 +						tdres = container_of(res, struct table_driven_reservation, res);
6367 +						kfree(tdres->intervals);
6368 +					}
6369 +*/
6370 +					list_del(&res->list);
6371 +					kfree(res);
6372 +					found = 1;
6373 +					ret = 0;
6374 +				}
6375 +			}
6376 +		}
6377 +
6378 +		raw_spin_unlock(&state->lock);
6379 +		local_irq_restore(flags);
6380 +	}
6381 +	
6382 +	TRACE("Rerservation destroyed ret = %d\n", ret);
6383 +	return ret;
6384 +}
6385 +
6386 +/* mc2_task_exit - Task became a normal task (not real-time task)
6387 + */
6388 +static void mc2_task_exit(struct task_struct *tsk)
6389 +{
6390 +	unsigned long flags;
6391 +	struct mc2_task_state* tinfo = get_mc2_state(tsk);
6392 +	struct mc2_cpu_state *state;
6393 +	enum crit_level lv = tinfo->mc2_param.crit;
6394 +	struct crit_entry* ce;
6395 +	int cpu;
6396 +
6397 +	local_irq_save(flags);
6398 +	if (tinfo->cpu != -1)
6399 +		state = cpu_state_for(tinfo->cpu);
6400 +	else 
6401 +		state = local_cpu_state();
6402 +	
6403 +	raw_spin_lock(&state->lock);
6404 +	
6405 +	if (state->scheduled == tsk)
6406 +		state->scheduled = NULL;
6407 +
6408 +	ce = &state->crit_entries[lv];
6409 +	if (ce->running == tsk)
6410 +		ce->running = NULL;
6411 +	
6412 +	/* remove from queues */
6413 +	if (is_running(tsk)) {
6414 +		/* Assumption: litmus_clock() is synchronized across cores
6415 +		 * [see comment in pres_task_resume()] */
6416 +		
6417 +		/* update both global and partitioned */
6418 +		if (lv < CRIT_LEVEL_C) {
6419 +			sup_update_time(&state->sup_env, litmus_clock());
6420 +		}
6421 +		else if (lv == CRIT_LEVEL_C) {
6422 +			raw_spin_lock(&_global_env.lock);
6423 +			gmp_update_time(&_global_env, litmus_clock());
6424 +			//raw_spin_unlock(&_global_env.lock);
6425 +		}
6426 +		/* 9/20/2015 fix 
6427 +		mc2_update_ghost_state(state);
6428 +		*/
6429 +		task_departs(tsk, 0);
6430 +		if (lv == CRIT_LEVEL_C)
6431 +			raw_spin_unlock(&_global_env.lock);
6432 +		
6433 +		/* NOTE: drops state->lock */
6434 +		TRACE("mc2_exit()\n");
6435 +
6436 +		mc2_update_timer_and_unlock(state);	
6437 +	} else {
6438 +		raw_spin_unlock(&state->lock);
6439 +		
6440 +	}
6441 +
6442 +	if (lv == CRIT_LEVEL_C) {
6443 +		for_each_online_cpu(cpu) {
6444 +			state = cpu_state_for(cpu);
6445 +			if (state == local_cpu_state())
6446 +				continue;
6447 +			raw_spin_lock(&state->lock);
6448 +			
6449 +			if (state->scheduled == tsk)
6450 +				state->scheduled = NULL;
6451 +			
6452 +			ce = &state->crit_entries[lv];
6453 +			if (ce->running == tsk)
6454 +				ce->running = NULL;
6455 +			
6456 +			raw_spin_unlock(&state->lock);
6457 +		}
6458 +	}
6459 +	
6460 +	local_irq_restore(flags);
6461 +	
6462 +	kfree(tsk_rt(tsk)->plugin_state);
6463 +	tsk_rt(tsk)->plugin_state = NULL;
6464 +	kfree(tsk_rt(tsk)->mc2_data);
6465 +	tsk_rt(tsk)->mc2_data = NULL;
6466 +}
6467 +
6468 +/* create_polling_reservation - create a new polling reservation
6469 + */
6470 +static long create_polling_reservation(
6471 +	int res_type,
6472 +	struct reservation_config *config)
6473 +{
6474 +	struct mc2_cpu_state *state;
6475 +	struct reservation* res;
6476 +	struct polling_reservation *pres;
6477 +	unsigned long flags;
6478 +	int use_edf  = config->priority == LITMUS_NO_PRIORITY;
6479 +	int periodic =  res_type == PERIODIC_POLLING;
6480 +	long err = -EINVAL;
6481 +
6482 +	/* sanity checks */
6483 +	if (config->polling_params.budget >
6484 +	    config->polling_params.period) {
6485 +		printk(KERN_ERR "invalid polling reservation (%u): "
6486 +		       "budget > period\n", config->id);
6487 +		return -EINVAL;
6488 +	}
6489 +	if (config->polling_params.budget >
6490 +	    config->polling_params.relative_deadline
6491 +	    && config->polling_params.relative_deadline) {
6492 +		printk(KERN_ERR "invalid polling reservation (%u): "
6493 +		       "budget > deadline\n", config->id);
6494 +		return -EINVAL;
6495 +	}
6496 +	if (config->polling_params.offset >
6497 +	    config->polling_params.period) {
6498 +		printk(KERN_ERR "invalid polling reservation (%u): "
6499 +		       "offset > period\n", config->id);
6500 +		return -EINVAL;
6501 +	}
6502 +
6503 +	/* Allocate before we grab a spin lock.
6504 +	 * Todo: would be nice to use a core-local allocation.
6505 +	 */
6506 +	pres = kzalloc(sizeof(*pres), GFP_KERNEL);
6507 +	if (!pres)
6508 +		return -ENOMEM;
6509 +
6510 +	if (config->cpu != -1) {
6511 +		
6512 +		//raw_spin_lock_irqsave(&_global_env.lock, flags);
6513 +		state = cpu_state_for(config->cpu);
6514 +		raw_spin_lock_irqsave(&state->lock, flags);
6515 +
6516 +		res = sup_find_by_id(&state->sup_env, config->id);
6517 +		if (!res) {
6518 +			polling_reservation_init(pres, use_edf, periodic,
6519 +				config->polling_params.budget,
6520 +				config->polling_params.period,
6521 +				config->polling_params.relative_deadline,
6522 +				config->polling_params.offset);
6523 +			pres->res.id = config->id;
6524 +			pres->res.blocked_by_ghost = 0;
6525 +			pres->res.is_ghost = NO_CPU;
6526 +			/*if (config->priority == LITMUS_MAX_PRIORITY) {
6527 +				level_a_priorities[config->cpu]++;
6528 +				pres->res.priority = level_a_priorities[config->cpu];
6529 +			}*/
6530 +			if (!use_edf)
6531 +				pres->res.priority = config->priority;
6532 +			sup_add_new_reservation(&state->sup_env, &pres->res);
6533 +			err = config->id;
6534 +			TRACE_CUR("reservation created R%d priority : %llu\n", config->id, pres->res.priority);
6535 +		} else {
6536 +			err = -EEXIST;
6537 +		}
6538 +
6539 +		raw_spin_unlock_irqrestore(&state->lock, flags);
6540 +		//raw_spin_unlock_irqrestore(&_global_env.lock, flags);
6541 +
6542 +	} else {
6543 +		raw_spin_lock_irqsave(&_global_env.lock, flags);
6544 +		
6545 +		res = gmp_find_by_id(&_global_env, config->id);
6546 +		if (!res) {
6547 +			polling_reservation_init(pres, use_edf, periodic,
6548 +				config->polling_params.budget,
6549 +				config->polling_params.period,
6550 +				config->polling_params.relative_deadline,
6551 +				config->polling_params.offset);
6552 +			pres->res.id = config->id;
6553 +			pres->res.blocked_by_ghost = 0;
6554 +			pres->res.scheduled_on = NO_CPU;
6555 +			pres->res.is_ghost = NO_CPU;
6556 +			if (!use_edf)
6557 +				pres->res.priority = config->priority;
6558 +			gmp_add_new_reservation(&_global_env, &pres->res);
6559 +			err = config->id;
6560 +		} else {
6561 +			err = -EEXIST;
6562 +		}
6563 +		raw_spin_unlock_irqrestore(&_global_env.lock, flags);		
6564 +	}
6565 +	
6566 +	if (err < 0)
6567 +		kfree(pres);
6568 +
6569 +	return err;
6570 +}
6571 +
6572 +#define MAX_INTERVALS 1024
6573 +
6574 +/* create_table_driven_reservation - create a table_driven reservation
6575 + */
6576 +static long create_table_driven_reservation(
6577 +	struct reservation_config *config)
6578 +{
6579 +	struct mc2_cpu_state *state;
6580 +	struct reservation* res;
6581 +	struct table_driven_reservation *td_res = NULL;
6582 +	struct lt_interval *slots = NULL;
6583 +	size_t slots_size;
6584 +	unsigned int i, num_slots;
6585 +	unsigned long flags;
6586 +	long err = -EINVAL;
6587 +
6588 +
6589 +	if (!config->table_driven_params.num_intervals) {
6590 +		printk(KERN_ERR "invalid table-driven reservation (%u): "
6591 +		       "no intervals\n", config->id);
6592 +		return -EINVAL;
6593 +	}
6594 +
6595 +	if (config->table_driven_params.num_intervals > MAX_INTERVALS) {
6596 +		printk(KERN_ERR "invalid table-driven reservation (%u): "
6597 +		       "too many intervals (max: %d)\n", config->id, MAX_INTERVALS);
6598 +		return -EINVAL;
6599 +	}
6600 +
6601 +	num_slots = config->table_driven_params.num_intervals;
6602 +	slots_size = sizeof(slots[0]) * num_slots;
6603 +	slots = kzalloc(slots_size, GFP_KERNEL);
6604 +	if (!slots)
6605 +		return -ENOMEM;
6606 +
6607 +	td_res = kzalloc(sizeof(*td_res), GFP_KERNEL);
6608 +	if (!td_res)
6609 +		err = -ENOMEM;
6610 +	else
6611 +		err = copy_from_user(slots,
6612 +			config->table_driven_params.intervals, slots_size);
6613 +
6614 +	if (!err) {
6615 +		/* sanity checks */
6616 +		for (i = 0; !err && i < num_slots; i++)
6617 +			if (slots[i].end <= slots[i].start) {
6618 +				printk(KERN_ERR
6619 +				       "invalid table-driven reservation (%u): "
6620 +				       "invalid interval %u => [%llu, %llu]\n",
6621 +				       config->id, i,
6622 +				       slots[i].start, slots[i].end);
6623 +				err = -EINVAL;
6624 +			}
6625 +
6626 +		for (i = 0; !err && i + 1 < num_slots; i++)
6627 +			if (slots[i + 1].start <= slots[i].end) {
6628 +				printk(KERN_ERR
6629 +				       "invalid table-driven reservation (%u): "
6630 +				       "overlapping intervals %u, %u\n",
6631 +				       config->id, i, i + 1);
6632 +				err = -EINVAL;
6633 +			}
6634 +
6635 +		if (slots[num_slots - 1].end >
6636 +			config->table_driven_params.major_cycle_length) {
6637 +			printk(KERN_ERR
6638 +				"invalid table-driven reservation (%u): last "
6639 +				"interval ends past major cycle %llu > %llu\n",
6640 +				config->id,
6641 +				slots[num_slots - 1].end,
6642 +				config->table_driven_params.major_cycle_length);
6643 +			err = -EINVAL;
6644 +		}
6645 +	}
6646 +
6647 +	if (!err) {
6648 +		state = cpu_state_for(config->cpu);
6649 +		raw_spin_lock_irqsave(&state->lock, flags);
6650 +
6651 +		res = sup_find_by_id(&state->sup_env, config->id);
6652 +		if (!res) {
6653 +			table_driven_reservation_init(td_res,
6654 +				config->table_driven_params.major_cycle_length,
6655 +				slots, num_slots);
6656 +			td_res->res.id = config->id;
6657 +			td_res->res.priority = config->priority;
6658 +			td_res->res.blocked_by_ghost = 0;
6659 +			sup_add_new_reservation(&state->sup_env, &td_res->res);
6660 +			err = config->id;
6661 +		} else {
6662 +			err = -EEXIST;
6663 +		}
6664 +
6665 +		raw_spin_unlock_irqrestore(&state->lock, flags);
6666 +	}
6667 +
6668 +	if (err < 0) {
6669 +		kfree(slots);
6670 +		kfree(td_res);
6671 +	}
6672 +
6673 +	return err;
6674 +}
6675 +
6676 +/* mc2_reservation_create - reservation_create system call backend
6677 + */
6678 +static long mc2_reservation_create(int res_type, void* __user _config)
6679 +{
6680 +	long ret = -EINVAL;
6681 +	struct reservation_config config;
6682 +
6683 +	TRACE("Attempt to create reservation (%d)\n", res_type);
6684 +
6685 +	if (copy_from_user(&config, _config, sizeof(config)))
6686 +		return -EFAULT;
6687 +
6688 +	if (config.cpu != -1) {
6689 +		if (config.cpu < 0 || !cpu_online(config.cpu)) {
6690 +			printk(KERN_ERR "invalid polling reservation (%u): "
6691 +				   "CPU %d offline\n", config.id, config.cpu);
6692 +			return -EINVAL;
6693 +		}
6694 +	}
6695 +
6696 +	switch (res_type) {
6697 +		case PERIODIC_POLLING:
6698 +		case SPORADIC_POLLING:
6699 +			ret = create_polling_reservation(res_type, &config);
6700 +			break;
6701 +
6702 +		case TABLE_DRIVEN:
6703 +			ret = create_table_driven_reservation(&config);
6704 +			break;
6705 +
6706 +		default:
6707 +			return -EINVAL;
6708 +	};
6709 +
6710 +	return ret;
6711 +}
6712 +
6713 +static struct domain_proc_info mc2_domain_proc_info;
6714 +
6715 +static long mc2_get_domain_proc_info(struct domain_proc_info **ret)
6716 +{
6717 +	*ret = &mc2_domain_proc_info;
6718 +	return 0;
6719 +}
6720 +
6721 +static void mc2_setup_domain_proc(void)
6722 +{
6723 +	int i, cpu;
6724 +	int num_rt_cpus = num_online_cpus();
6725 +
6726 +	struct cd_mapping *cpu_map, *domain_map;
6727 +
6728 +	memset(&mc2_domain_proc_info, sizeof(mc2_domain_proc_info), 0);
6729 +	init_domain_proc_info(&mc2_domain_proc_info, num_rt_cpus, num_rt_cpus);
6730 +	mc2_domain_proc_info.num_cpus = num_rt_cpus;
6731 +	mc2_domain_proc_info.num_domains = num_rt_cpus;
6732 +
6733 +	i = 0;
6734 +	for_each_online_cpu(cpu) {
6735 +		cpu_map = &mc2_domain_proc_info.cpu_to_domains[i];
6736 +		domain_map = &mc2_domain_proc_info.domain_to_cpus[i];
6737 +
6738 +		cpu_map->id = cpu;
6739 +		domain_map->id = i;
6740 +		cpumask_set_cpu(i, cpu_map->mask);
6741 +		cpumask_set_cpu(cpu, domain_map->mask);
6742 +		++i;
6743 +	}
6744 +}
6745 +
6746 +static long mc2_activate_plugin(void)
6747 +{
6748 +	int cpu, lv;
6749 +	struct mc2_cpu_state *state;
6750 +	struct cpu_entry *ce;
6751 +
6752 +	gmp_init(&_global_env);
6753 +	raw_spin_lock_init(&_lowest_prio_cpu.lock);
6754 +	
6755 +	for_each_online_cpu(cpu) {
6756 +		TRACE("Initializing CPU%d...\n", cpu);
6757 +
6758 +		resched_cpu[cpu] = 0;
6759 +		level_a_priorities[cpu] = 0;
6760 +		state = cpu_state_for(cpu);
6761 +		ce = &_lowest_prio_cpu.cpu_entries[cpu];
6762 +		
6763 +		ce->cpu = cpu;
6764 +		ce->scheduled = NULL;
6765 +		ce->deadline = ULLONG_MAX;
6766 +		ce->lv = NUM_CRIT_LEVELS;
6767 +		ce->will_schedule = false;
6768 +
6769 +		raw_spin_lock_init(&state->lock);
6770 +		state->cpu = cpu;
6771 +		state->scheduled = NULL;
6772 +		for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
6773 +			struct crit_entry *cr_entry = &state->crit_entries[lv];
6774 +			cr_entry->level = lv;
6775 +			cr_entry->running = NULL;
6776 +		}
6777 +		sup_init(&state->sup_env);
6778 +
6779 +		hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
6780 +		state->timer.function = on_scheduling_timer;
6781 +	}
6782 +
6783 +	mc2_setup_domain_proc();
6784 +
6785 +	return 0;
6786 +}
6787 +
6788 +static void mc2_finish_switch(struct task_struct *prev)
6789 +{
6790 +	int cpus;
6791 +	enum crit_level lv = get_task_crit_level(prev);
6792 +	struct mc2_cpu_state *state = local_cpu_state();
6793 +	
6794 +	state->scheduled = is_realtime(current) ? current : NULL;
6795 +	if (lv == CRIT_LEVEL_C) {
6796 +		for (cpus = 0; cpus<NR_CPUS; cpus++) {
6797 +			if (resched_cpu[cpus]) {
6798 +				litmus_reschedule(cpus);
6799 +			}
6800 +		}
6801 +	}
6802 +}
6803 +
6804 +static long mc2_deactivate_plugin(void)
6805 +{
6806 +	int cpu;
6807 +	struct mc2_cpu_state *state;
6808 +	struct reservation *res;
6809 +	struct next_timer_event *event;
6810 +	struct cpu_entry *ce;
6811 +	
6812 +	for_each_online_cpu(cpu) {
6813 +		state = cpu_state_for(cpu);
6814 +		raw_spin_lock(&state->lock);
6815 +
6816 +		hrtimer_cancel(&state->timer);
6817 +
6818 +		ce = &_lowest_prio_cpu.cpu_entries[cpu];
6819 +		
6820 +		ce->cpu = cpu;
6821 +		ce->scheduled = NULL;
6822 +		ce->deadline = ULLONG_MAX;
6823 +		ce->lv = NUM_CRIT_LEVELS;
6824 +		ce->will_schedule = false;
6825 +
6826 +		/* Delete all reservations --- assumes struct reservation
6827 +		 * is prefix of containing struct. */
6828 +
6829 +		while (!list_empty(&state->sup_env.active_reservations)) {
6830 +			res = list_first_entry(
6831 +				&state->sup_env.active_reservations,
6832 +			        struct reservation, list);
6833 +			list_del(&res->list);
6834 +			kfree(res);
6835 +		}
6836 +
6837 +		while (!list_empty(&state->sup_env.inactive_reservations)) {
6838 +			res = list_first_entry(
6839 +				&state->sup_env.inactive_reservations,
6840 +			        struct reservation, list);
6841 +			list_del(&res->list);
6842 +			kfree(res);
6843 +		}
6844 +
6845 +		while (!list_empty(&state->sup_env.depleted_reservations)) {
6846 +			res = list_first_entry(
6847 +				&state->sup_env.depleted_reservations,
6848 +			        struct reservation, list);
6849 +			list_del(&res->list);
6850 +			kfree(res);
6851 +		}
6852 +
6853 +		raw_spin_unlock(&state->lock);
6854 +	}
6855 +
6856 +	raw_spin_lock(&_global_env.lock);
6857 +	
6858 +	while (!list_empty(&_global_env.active_reservations)) {
6859 +		res = list_first_entry(
6860 +			&_global_env.active_reservations,
6861 +				struct reservation, list);
6862 +		list_del(&res->list);
6863 +		kfree(res);
6864 +	}
6865 +
6866 +	while (!list_empty(&_global_env.inactive_reservations)) {
6867 +		res = list_first_entry(
6868 +			&_global_env.inactive_reservations,
6869 +				struct reservation, list);
6870 +		list_del(&res->list);
6871 +		kfree(res);
6872 +	}
6873 +
6874 +	while (!list_empty(&_global_env.depleted_reservations)) {
6875 +		res = list_first_entry(
6876 +			&_global_env.depleted_reservations,
6877 +				struct reservation, list);
6878 +		list_del(&res->list);
6879 +		kfree(res);
6880 +	}
6881 +	
6882 +	while (!list_empty(&_global_env.next_events)) {
6883 +		event = list_first_entry(
6884 +			&_global_env.next_events,
6885 +				struct next_timer_event, list);
6886 +		list_del(&event->list);
6887 +		kfree(event);
6888 +	}
6889 +	
6890 +	raw_spin_unlock(&_global_env.lock);
6891 +	
6892 +	destroy_domain_proc_info(&mc2_domain_proc_info);
6893 +	return 0;
6894 +}
6895 +
6896 +static struct sched_plugin mc2_plugin = {
6897 +	.plugin_name			= "MC2",
6898 +	.schedule				= mc2_schedule,
6899 +	.finish_switch			= mc2_finish_switch,
6900 +	.task_wake_up			= mc2_task_resume,
6901 +	.admit_task				= mc2_admit_task,
6902 +	.task_new				= mc2_task_new,
6903 +	.task_exit				= mc2_task_exit,
6904 +	.complete_job           = mc2_complete_job,
6905 +	.get_domain_proc_info   = mc2_get_domain_proc_info,
6906 +	.activate_plugin		= mc2_activate_plugin,
6907 +	.deactivate_plugin      = mc2_deactivate_plugin,
6908 +	.reservation_create     = mc2_reservation_create,
6909 +	.reservation_destroy	= mc2_reservation_destroy,
6910 +};
6911 +
6912 +static int __init init_mc2(void)
6913 +{
6914 +	return register_sched_plugin(&mc2_plugin);
6915 +}
6916 +
6917 +module_init(init_mc2);
6918 diff --git litmus/sched_psn_edf.c litmus/sched_psn_edf.c
6919 index 2549a3f..216b9f3 100644
6920 --- litmus/sched_psn_edf.c
6921 +++ litmus/sched_psn_edf.c
6922 @@ -23,6 +23,10 @@
6923  #include <litmus/sched_trace.h>
6924  #include <litmus/trace.h>
6925  
6926 +#ifdef CONFIG_PGMRT_SUPPORT
6927 +#include <litmus/pgm.h>
6928 +#endif
6929 +
6930  /* to set up domain/cpu mappings */
6931  #include <litmus/litmus_proc.h>
6932  
6933 @@ -199,6 +203,62 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
6934  	 */
6935  	resched = preempt;
6936  
6937 +#ifdef CONFIG_PGMRT_SUPPORT
6938 +	if (exists) {
6939 +		if (is_pgm_sending(pedf->scheduled)) {
6940 +			if (!is_pgm_satisfied(pedf->scheduled)) {
6941 +				if (!is_priority_boosted(pedf->scheduled)) {
6942 +					TRACE_TASK(pedf->scheduled, "is sending PGM tokens and needs boosting.\n");
6943 +					BUG_ON(is_pgm_satisfied(pedf->scheduled));
6944 +
6945 +					/* We are either sending tokens or waiting for tokes.
6946 +					   If waiting: Boost priority so we'll be scheduled
6947 +						immediately when needed tokens arrive.
6948 +					   If sending: Boost priority so no one (specifically, our
6949 +						consumers) will preempt us while signalling the token
6950 +						transmission.
6951 +					*/
6952 +					tsk_rt(pedf->scheduled)->priority_boosted = 1;
6953 +					tsk_rt(pedf->scheduled)->boost_start_time = litmus_clock();
6954 +
6955 +					if (likely(!blocks)) {
6956 +						requeue(pedf->scheduled, edf);
6957 +						/* we may regain the processor */
6958 +						if (preempt) {
6959 +							preempt = edf_preemption_needed(edf, prev);
6960 +							if (!preempt) {
6961 +								TRACE_TASK(pedf->scheduled, "blocked preemption by lazy boosting.\n");
6962 +							}
6963 +						}
6964 +					}
6965 +				}
6966 +			}
6967 +			else { /* sending is satisfied */
6968 +				tsk_rt(pedf->scheduled)->ctrl_page->pgm_sending = 0;
6969 +				tsk_rt(pedf->scheduled)->ctrl_page->pgm_satisfied = 0;
6970 +
6971 +				if (is_priority_boosted(pedf->scheduled)) {
6972 +					TRACE_TASK(pedf->scheduled,
6973 +							"is done sending PGM tokens must relinquish boosting.\n");
6974 +					/* clear boosting */
6975 +					tsk_rt(pedf->scheduled)->priority_boosted = 0;
6976 +					if(likely(!blocks)) {
6977 +						/* recheck priority */
6978 +						requeue(pedf->scheduled, edf);
6979 +						/* we may lose the processor */
6980 +						if (!preempt) {
6981 +							preempt = edf_preemption_needed(edf, prev);
6982 +							if (preempt) {
6983 +								TRACE_TASK(pedf->scheduled, "preempted by lazy unboosting.\n");
6984 +							}
6985 +						}
6986 +					}
6987 +				}
6988 +			}
6989 +		}
6990 +	}
6991 +#endif
6992 +	
6993  	/* If a task blocks we have no choice but to reschedule.
6994  	 */
6995  	if (blocks)
6996 @@ -243,7 +303,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
6997  	if (next) {
6998  		TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
6999  	} else {
7000 -		TRACE("becoming idle at %llu\n", litmus_clock());
7001 +		; //TRACE("becoming idle at %llu\n", litmus_clock());
7002  	}
7003  
7004  	pedf->scheduled = next;
7005 @@ -644,10 +704,14 @@ static long psnedf_admit_task(struct task_struct* tsk)
7006  	    /* don't allow tasks on release master CPU */
7007  	     && task_cpu(tsk) != remote_edf(task_cpu(tsk))->release_master
7008  #endif
7009 -		)
7010 +		) {
7011 +		TRACE_TASK(tsk, "admitted\n");
7012  		return 0;
7013 -	else
7014 +	}
7015 +	else {
7016 +		TRACE_TASK(tsk, "not admitted\n");
7017  		return -EINVAL;
7018 +	}
7019  }
7020  
7021  /*	Plugin object	*/

Attached Files

To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.
  • [get | view] (2015-06-04 21:22:59, 244.8 KB) [[attachment:MC2-liblitmus-imx6-rtss15.patch]]
  • [get | view] (2016-05-12 14:35:37, 51.9 KB) [[attachment:MC2-liblitmus-rtss16.patch]]
  • [get | view] (2016-05-12 14:36:06, 190.4 KB) [[attachment:MC2-litmus-rt-rtss16.patch]]
  • [get | view] (2015-07-19 10:27:52, 1119.9 KB) [[attachment:MC2-litmut-rt-imx6-rtss15.patch]]
  • [get | view] (2014-05-27 20:46:19, 58.3 KB) [[attachment:MC2_liblitmus_ipdps15.patch]]
  • [get | view] (2014-05-27 20:45:43, 1044.3 KB) [[attachment:MC2_litmusrt_ipdps15.patch]]
  • [get | view] (2017-04-07 21:48:09, 6099.5 KB) [[attachment:buff_sharing.tar]]
  • [get | view] (2015-01-08 14:20:07, 61.0 KB) [[attachment:feather-trace-patch-against-sched-deadline-v8.patch]]
  • [get | view] (2014-04-01 23:10:10, 38.9 KB) [[attachment:gedf-mp-rtas14.patch]]
  • [get | view] (2012-03-02 20:13:59, 1.9 KB) [[attachment:gpu-klmirqd-liblitmus-rt-ecrts12.patch]]
  • [get | view] (2012-03-02 20:14:25, 389.8 KB) [[attachment:gpu-klmirqd-litmus-rt-ecrts12.patch]]
  • [get | view] (2012-05-26 21:41:34, 418.0 KB) [[attachment:gpusync-rtss12.patch]]
  • [get | view] (2012-05-26 21:42:20, 8.6 KB) [[attachment:gpusync_liblitmus-rtss12.patch]]
  • [get | view] (2013-05-21 15:32:08, 208.6 KB) [[attachment:gpusync_rtss13_liblitmus.patch]]
  • [get | view] (2013-05-21 15:31:32, 779.5 KB) [[attachment:gpusync_rtss13_litmus.patch]]
  • [get | view] (2012-05-26 21:42:41, 71.4 KB) [[attachment:klt_tracker_v1.0.litmus.tgz]]
  • [get | view] (2016-10-13 21:14:05, 19.6 KB) [[attachment:liblitmus-rtas17.patch]]
  • [get | view] (2017-05-01 20:46:22, 90.0 KB) [[attachment:liblitmus-rtns17.patch]]
  • [get | view] (2018-12-11 01:38:53, 49.1 KB) [[attachment:liblitmus-semi-part-with-edfos.patch]]
  • [get | view] (2017-10-09 19:16:09, 304.0 KB) [[attachment:litmus-rt-os-isolation.patch]]
  • [get | view] (2016-10-13 21:13:27, 207.6 KB) [[attachment:litmus-rt-rtas17.patch]]
  • [get | view] (2017-05-01 20:46:40, 207.6 KB) [[attachment:litmus-rt-rtns17.patch]]
  • [get | view] (2018-12-11 01:39:04, 100.5 KB) [[attachment:litmus-rt-semi-part-with-edfos.patch]]
  • [get | view] (2018-06-26 04:31:48, 7.0 KB) [[attachment:mc2_liblitmus_2015.1-rtns18.patch]]
  • [get | view] (2018-06-26 04:31:33, 292.7 KB) [[attachment:mc2_litmus-rt_2015.1-rtns18.patch]]
  • [get | view] (2017-05-01 20:45:10, 2596.9 KB) [[attachment:mcp_study.zip]]
  • [get | view] (2013-07-13 14:11:53, 58.0 KB) [[attachment:omip-ecrts13.patch]]
  • [get | view] (2014-02-19 21:48:33, 17.2 KB) [[attachment:pgmrt-liblitmus-ecrts14.patch]]
  • [get | view] (2014-02-19 21:47:57, 87.8 KB) [[attachment:pgmrt-litmusrt-ecrts14.patch]]
  • [get | view] (2015-01-08 14:22:32, 61.0 KB) [[attachment:sched-deadline-v8-feather-trace-rtas14.patch]]
  • [get | view] (2018-06-26 04:32:13, 2545.1 KB) [[attachment:sched_study_rtns2018.tar.gz]]
  • [get | view] (2017-04-07 21:53:39, 5969.5 KB) [[attachment:seminal.tar]]
  • [get | view] (2017-04-07 21:51:13, 6064.0 KB) [[attachment:shared_libraries.tar]]
  • [get | view] (2013-07-13 13:58:25, 42.7 KB) [[attachment:tracing-and-dflp-rtas13.patch]]
 All files | Selected Files: delete move to page copy to page

You are not allowed to attach a file to this page.