Attachment 'litmus-rt-rtns17.patch'
Download 1 diff --git arch/arm/boot/compressed/Makefile arch/arm/boot/compressed/Makefile
2 index 6e1fb2b..e2284fe 100644
3 --- arch/arm/boot/compressed/Makefile
4 +++ arch/arm/boot/compressed/Makefile
5 @@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
6 ORIG_CFLAGS := $(KBUILD_CFLAGS)
7 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
8 endif
9 +KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
10
11 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
12 asflags-y := -DZIMAGE
13 diff --git arch/arm/include/asm/unistd.h arch/arm/include/asm/unistd.h
14 index 0c462a9..e6cb574 100644
15 --- arch/arm/include/asm/unistd.h
16 +++ arch/arm/include/asm/unistd.h
17 @@ -19,7 +19,8 @@
18 * This may need to be greater than __NR_last_syscall+1 in order to
19 * account for the padding in the syscall table
20 */
21 -#define __NR_syscalls (388 + NR_litmus_syscalls)
22 +#define __NR_syscalls (392 + NR_litmus_syscalls + 0)
23 +
24
25 /*
26 * *NOTE*: This is a ghost syscall private to the kernel. Only the
27 diff --git arch/arm/kernel/calls.S arch/arm/kernel/calls.S
28 index f4738a8..1bfbe24 100644
29 --- arch/arm/kernel/calls.S
30 +++ arch/arm/kernel/calls.S
31 @@ -409,6 +409,16 @@
32 CALL(sys_wait_for_ts_release)
33 CALL(sys_release_ts)
34 CALL(sys_null_call)
35 +/* 400 */ CALL(sys_get_current_budget)
36 + CALL(sys_reservation_create)
37 + CALL(sys_reservation_destroy)
38 + CALL(sys_set_mc2_task_param)
39 + CALL(sys_set_page_color)
40 +/* 405 */ CALL(sys_test_call)
41 + CALL(sys_run_test)
42 + CALL(sys_lock_buffer)
43 + CALL(sys_request_mode)
44 + CALL(sys_enact_mode)
45
46 #ifndef syscalls_counted
47 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
48 diff --git arch/arm/mm/cache-l2x0.c arch/arm/mm/cache-l2x0.c
49 index e309c8f..71c969a 100644
50 --- arch/arm/mm/cache-l2x0.c
51 +++ arch/arm/mm/cache-l2x0.c
52 @@ -33,6 +33,8 @@
53 #include "cache-tauros3.h"
54 #include "cache-aurora-l2.h"
55
56 +#include <litmus/cache_proc.h>
57 +
58 struct l2c_init_data {
59 const char *type;
60 unsigned way_size_0;
61 @@ -726,7 +728,6 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
62
63 if (n) {
64 unsigned i;
65 -
66 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
67 for (i = 0; i < n; i++)
68 pr_cont(" %s", errata[i]);
69 @@ -774,6 +775,11 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
70 },
71 };
72
73 +void l2c310_flush_all(void)
74 +{
75 + l2c210_flush_all();
76 +};
77 +
78 static int __init __l2c_init(const struct l2c_init_data *data,
79 u32 aux_val, u32 aux_mask, u32 cache_id)
80 {
81 @@ -876,6 +882,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
82 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
83 data->type, cache_id, aux);
84
85 + litmus_setup_lockdown(l2x0_base, cache_id);
86 +
87 return 0;
88 }
89
90 diff --git arch/x86/syscalls/syscall_32.tbl arch/x86/syscalls/syscall_32.tbl
91 index 34680a5..b303a9b 100644
92 --- arch/x86/syscalls/syscall_32.tbl
93 +++ arch/x86/syscalls/syscall_32.tbl
94 @@ -377,3 +377,11 @@
95 368 i386 wait_for_ts_release sys_wait_for_ts_release
96 369 i386 release_ts sys_release_ts
97 370 i386 null_call sys_null_call
98 +371 i386 get_current_budget sys_get_current_budget
99 +372 i386 reservation_create sys_reservation_create
100 +373 i386 reservation_destroy sys_reservation_destroy
101 +374 i386 set_mc2_task_param sys_set_mc2_task_param
102 +375 i386 set_page_color sys_set_page_color
103 +376 i386 test_call sys_test_call
104 +377 i386 run_test sys_run_test
105 +378 i386 lock_buffer sys_lock_buffer
106 diff --git arch/x86/syscalls/syscall_64.tbl arch/x86/syscalls/syscall_64.tbl
107 index cbd1b6b..5f24a80 100644
108 --- arch/x86/syscalls/syscall_64.tbl
109 +++ arch/x86/syscalls/syscall_64.tbl
110 @@ -342,6 +342,14 @@
111 360 common wait_for_ts_release sys_wait_for_ts_release
112 361 common release_ts sys_release_ts
113 362 common null_call sys_null_call
114 +363 common get_current_budget sys_get_current_budget
115 +364 common reservation_create sys_reservation_create
116 +365 common reservation_destroy sys_reservation_destroy
117 +366 common set_mc2_task_param sys_set_mc2_task_param
118 +367 common set_page_color sys_set_page_color
119 +368 common test_call sys_test_call
120 +369 common run_test sys_run_test
121 +370 common lock_buffer sys_lock_buffer
122
123 #
124 # x32-specific system call numbers start at 512 to avoid cache impact
125 diff --git include/litmus/budget.h include/litmus/budget.h
126 index bd2d5c9..60eb814 100644
127 --- include/litmus/budget.h
128 +++ include/litmus/budget.h
129 @@ -33,4 +33,6 @@ static inline int requeue_preempted_job(struct task_struct* t)
130 (!budget_exhausted(t) || !budget_enforced(t));
131 }
132
133 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining);
134 +
135 #endif
136 diff --git include/litmus/cache_proc.h include/litmus/cache_proc.h
137 new file mode 100644
138 index 0000000..e9440de
139 --- /dev/null
140 +++ include/litmus/cache_proc.h
141 @@ -0,0 +1,17 @@
142 +#ifndef LITMUS_CACHE_PROC_H
143 +#define LITMUS_CACHE_PROC_H
144 +
145 +#ifdef __KERNEL__
146 +
147 +void litmus_setup_lockdown(void __iomem*, u32);
148 +void enter_irq_mode(void);
149 +void exit_irq_mode(void);
150 +void flush_cache(int all);
151 +void lock_cache(int cpu, u32 val);
152 +
153 +extern struct page *new_alloc_page_color(unsigned long color);
154 +
155 +#endif
156 +
157 +#endif
158 +
159 diff --git include/litmus/litmus.h include/litmus/litmus.h
160 index a6eb534..1037b48 100644
161 --- include/litmus/litmus.h
162 +++ include/litmus/litmus.h
163 @@ -113,6 +113,13 @@ static inline lt_t litmus_clock(void)
164 ((current)->state == TASK_RUNNING || \
165 preempt_count() & PREEMPT_ACTIVE)
166
167 +#define is_running(t) \
168 + ((t)->state == TASK_RUNNING || \
169 + task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
170 +
171 +#define is_blocked(t) \
172 + (!is_running(t))
173 +
174 #define is_released(t, now) \
175 (lt_before_eq(get_release(t), now))
176 #define is_tardy(t, now) \
177 @@ -130,6 +137,11 @@ void preempt_if_preemptable(struct task_struct* t, int on_cpu);
178
179 #define bheap2task(hn) ((struct task_struct*) hn->value)
180
181 +static inline int is_mode_poll_task(struct task_struct *t)
182 +{
183 + return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->mode_poll_task : 0;
184 +}
185 +
186 #ifdef CONFIG_NP_SECTION
187
188 static inline int is_kernel_np(struct task_struct *t)
189 diff --git include/litmus/mc2_common.h include/litmus/mc2_common.h
190 new file mode 100644
191 index 0000000..4b077ca
192 --- /dev/null
193 +++ include/litmus/mc2_common.h
194 @@ -0,0 +1,35 @@
195 +/*
196 + * MC^2 common data structures
197 + */
198 +
199 +#ifndef __UNC_MC2_COMMON_H__
200 +#define __UNC_MC2_COMMON_H__
201 +
202 +#define NR_MODES 32
203 +
204 +enum crit_level {
205 + CRIT_LEVEL_A = 0,
206 + CRIT_LEVEL_B = 1,
207 + CRIT_LEVEL_C = 2,
208 + NUM_CRIT_LEVELS = 3,
209 + MODE_POLL_TASK = 4,
210 +};
211 +
212 +struct mc2_task {
213 + enum crit_level crit;
214 + unsigned int res_id;
215 + uint32_t mode_mask;
216 + int init_finished;
217 +};
218 +
219 +#ifdef __KERNEL__
220 +
221 +#include <litmus/reservation.h>
222 +
223 +#define tsk_mc2_data(t) (tsk_rt(t)->mc2_data)
224 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
225 + struct reservation *res);
226 +
227 +#endif /* __KERNEL__ */
228 +
229 +#endif
230 diff --git include/litmus/polling_reservations.h include/litmus/polling_reservations.h
231 new file mode 100644
232 index 0000000..66c9b1e
233 --- /dev/null
234 +++ include/litmus/polling_reservations.h
235 @@ -0,0 +1,36 @@
236 +#ifndef LITMUS_POLLING_RESERVATIONS_H
237 +#define LITMUS_POLLING_RESERVATIONS_H
238 +
239 +#include <litmus/reservation.h>
240 +
241 +struct polling_reservation {
242 + /* extend basic reservation */
243 + struct reservation res;
244 +
245 + lt_t max_budget;
246 + lt_t period;
247 + lt_t deadline;
248 + lt_t offset;
249 +};
250 +
251 +void polling_reservation_init(struct polling_reservation *pres, int use_edf_prio,
252 + int use_periodic_polling, lt_t budget, lt_t period, lt_t deadline, lt_t offset);
253 +
254 +struct table_driven_reservation {
255 + /* extend basic reservation */
256 + struct reservation res;
257 +
258 + lt_t major_cycle;
259 + unsigned int next_interval;
260 + unsigned int num_intervals;
261 + struct lt_interval *intervals;
262 +
263 + /* info about current scheduling slot */
264 + struct lt_interval cur_interval;
265 + lt_t major_cycle_start;
266 +};
267 +
268 +void table_driven_reservation_init(struct table_driven_reservation *tdres,
269 + lt_t major_cycle, struct lt_interval *intervals, unsigned int num_intervals);
270 +
271 +#endif
272 diff --git include/litmus/reservation.h include/litmus/reservation.h
273 new file mode 100644
274 index 0000000..ee3aac2
275 --- /dev/null
276 +++ include/litmus/reservation.h
277 @@ -0,0 +1,263 @@
278 +#ifndef LITMUS_RESERVATION_H
279 +#define LITMUS_RESERVATION_H
280 +
281 +#include <linux/list.h>
282 +#include <linux/hrtimer.h>
283 +
284 +struct reservation_client;
285 +struct reservation_environment;
286 +struct reservation;
287 +
288 +typedef enum {
289 + /* reservation has no clients, is not consuming budget */
290 + RESERVATION_INACTIVE = 0,
291 +
292 + /* reservation has clients, consumes budget when scheduled */
293 + RESERVATION_ACTIVE,
294 +
295 + /* reservation has no clients, but may be consuming budget */
296 + RESERVATION_ACTIVE_IDLE,
297 +
298 + /* Reservation has no budget and waits for
299 + * replenishment. May or may not have clients. */
300 + RESERVATION_DEPLETED,
301 +} reservation_state_t;
302 +
303 +
304 +/* ************************************************************************** */
305 +
306 +/* Select which task to dispatch. If NULL is returned, it means there is nothing
307 + * to schedule right now and background work can be scheduled. */
308 +typedef struct task_struct * (*dispatch_t) (
309 + struct reservation_client *client
310 +);
311 +
312 +/* Something that can be managed in a reservation and that can yield
313 + * a process for dispatching. Contains a pointer to the reservation
314 + * to which it "belongs". */
315 +struct reservation_client {
316 + struct list_head list;
317 + struct reservation* reservation;
318 + dispatch_t dispatch;
319 +};
320 +
321 +
322 +/* ************************************************************************** */
323 +
324 +/* Called by reservations to request state change. */
325 +typedef void (*reservation_change_state_t) (
326 + struct reservation_environment* env,
327 + struct reservation *res,
328 + reservation_state_t new_state
329 +);
330 +
331 +/* The framework within wich reservations operate. */
332 +struct reservation_environment {
333 + lt_t time_zero;
334 + lt_t current_time;
335 +
336 + /* services invoked by reservations */
337 + reservation_change_state_t change_state;
338 +};
339 +
340 +
341 +/* ************************************************************************** */
342 +
343 +/* A new client is added or an existing client resumes. */
344 +typedef void (*client_arrives_t) (
345 + struct reservation *reservation,
346 + struct reservation_client *client
347 +);
348 +
349 +/* A client suspends or terminates. */
350 +typedef void (*client_departs_t) (
351 + struct reservation *reservation,
352 + struct reservation_client *client,
353 + int did_signal_job_completion
354 +);
355 +
356 +/* A previously requested replenishment has occurred. */
357 +typedef void (*on_replenishment_timer_t) (
358 + struct reservation *reservation
359 +);
360 +
361 +/* Update the reservation's budget to reflect execution or idling. */
362 +typedef void (*drain_budget_t) (
363 + struct reservation *reservation,
364 + lt_t how_much
365 +);
366 +
367 +/* Select a ready task from one of the clients for scheduling. */
368 +typedef struct task_struct* (*dispatch_client_t) (
369 + struct reservation *reservation,
370 + lt_t *time_slice /* May be used to force rescheduling after
371 + some amount of time. 0 => no limit */
372 +);
373 +
374 +
375 +struct reservation_ops {
376 + dispatch_client_t dispatch_client;
377 +
378 + client_arrives_t client_arrives;
379 + client_departs_t client_departs;
380 +
381 + on_replenishment_timer_t replenish;
382 + drain_budget_t drain_budget;
383 +};
384 +
385 +struct reservation {
386 + /* used to queue in environment */
387 + struct list_head list;
388 +
389 + reservation_state_t state;
390 + unsigned int id;
391 +
392 + /* exact meaning defined by impl. */
393 + lt_t priority;
394 + lt_t cur_budget;
395 + lt_t next_replenishment;
396 +
397 + /* budget stats */
398 + lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
399 + lt_t budget_consumed_total;
400 +
401 + /* interaction with framework */
402 + struct reservation_environment *env;
403 + struct reservation_ops *ops;
404 +
405 + struct list_head clients;
406 +
407 + /* for global env. */
408 + int scheduled_on;
409 + int event_added;
410 + /* for blocked by ghost. Do not charge budget when ACTIVE */
411 + int blocked_by_ghost;
412 + /* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
413 + int is_ghost;
414 +
415 + /* for mode change */
416 + int reported;
417 + int mode;
418 + //under mc2 assume only 1 task per reservation
419 + //multiple reservation per task
420 + struct task_struct *tsk;
421 +};
422 +
423 +void reservation_init(struct reservation *res);
424 +
425 +/* Default implementations */
426 +
427 +/* simply select the first client in the list, set *for_at_most to zero */
428 +struct task_struct* default_dispatch_client(
429 + struct reservation *res,
430 + lt_t *for_at_most
431 +);
432 +
433 +/* "connector" reservation client to hook up tasks with reservations */
434 +struct task_client {
435 + struct reservation_client client;
436 + struct task_struct *task;
437 +};
438 +
439 +void task_client_init(struct task_client *tc, struct task_struct *task,
440 + struct reservation *reservation);
441 +
442 +#define SUP_RESCHEDULE_NOW (0)
443 +#define SUP_NO_SCHEDULER_UPDATE (ULLONG_MAX)
444 +
445 +/* A simple uniprocessor (SUP) flat (i.e., non-hierarchical) reservation
446 + * environment.
447 + */
448 +struct sup_reservation_environment {
449 + struct reservation_environment env;
450 +
451 + /* ordered by priority */
452 + struct list_head active_reservations;
453 +
454 + /* ordered by next_replenishment */
455 + struct list_head depleted_reservations;
456 +
457 + /* unordered */
458 + struct list_head inactive_reservations;
459 +
460 + /* - SUP_RESCHEDULE_NOW means call sup_dispatch() now
461 + * - SUP_NO_SCHEDULER_UPDATE means nothing to do
462 + * any other value means program a timer for the given time
463 + */
464 + lt_t next_scheduler_update;
465 + /* set to true if a call to sup_dispatch() is imminent */
466 + bool will_schedule;
467 +};
468 +
469 +/* Contract:
470 + * - before calling into sup_ code, or any reservation methods,
471 + * update the time with sup_update_time(); and
472 + * - after calling into sup_ code, or any reservation methods,
473 + * check next_scheduler_update and program timer or trigger
474 + * scheduler invocation accordingly.
475 + */
476 +
477 +void sup_init(struct sup_reservation_environment* sup_env);
478 +void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
479 + struct reservation* new_res);
480 +void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
481 + lt_t timeout);
482 +void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
483 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
484 +
485 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
486 + unsigned int id);
487 +
488 +/* A global multiprocessor reservation environment. */
489 +
490 +typedef enum {
491 + EVENT_REPLENISH = 0,
492 + EVENT_DRAIN,
493 + EVENT_OTHERS,
494 +} event_type_t;
495 +
496 +
497 +struct next_timer_event {
498 + lt_t next_update;
499 + int timer_armed_on;
500 + unsigned int id;
501 + event_type_t type;
502 + struct list_head list;
503 +};
504 +
505 +struct gmp_reservation_environment {
506 + //raw_spinlock_t lock;
507 + struct reservation_environment env;
508 +
509 + /* ordered by priority */
510 + struct list_head active_reservations;
511 +
512 + /* ordered by next_replenishment */
513 + struct list_head depleted_reservations;
514 +
515 + /* unordered */
516 + struct list_head inactive_reservations;
517 +
518 + /* timer event ordered by next_update */
519 + struct list_head next_events;
520 +
521 + /* (schedule_now == true) means call gmp_dispatch() now */
522 + int schedule_now;
523 + /* set to true if a call to gmp_dispatch() is imminent */
524 + bool will_schedule;
525 +};
526 +
527 +void gmp_init(struct gmp_reservation_environment* gmp_env);
528 +void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
529 + struct reservation* new_res);
530 +void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
531 + lt_t timeout, unsigned int id, event_type_t type);
532 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
533 +int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
534 +struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
535 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
536 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
537 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
538 + unsigned int id);
539 +
540 +#endif
541 diff --git include/litmus/rt_param.h include/litmus/rt_param.h
542 index 7b9a909..2ec2d0c 100644
543 --- include/litmus/rt_param.h
544 +++ include/litmus/rt_param.h
545 @@ -51,6 +51,16 @@ typedef enum {
546 TASK_EARLY
547 } release_policy_t;
548
549 +#ifdef CONFIG_PGMRT_SUPPORT
550 +typedef enum {
551 + PGM_NOT_A_NODE,
552 + PGM_SRC,
553 + PGM_SINK,
554 + PGM_SRC_SINK,
555 + PGM_INTERNAL
556 +} pgm_node_type_t;
557 +#endif
558 +
559 /* We use the common priority interpretation "lower index == higher priority",
560 * which is commonly used in fixed-priority schedulability analysis papers.
561 * So, a numerically lower priority value implies higher scheduling priority,
562 @@ -62,6 +72,7 @@ typedef enum {
563 #define LITMUS_MAX_PRIORITY 512
564 #define LITMUS_HIGHEST_PRIORITY 1
565 #define LITMUS_LOWEST_PRIORITY (LITMUS_MAX_PRIORITY - 1)
566 +#define LITMUS_NO_PRIORITY UINT_MAX
567
568 /* Provide generic comparison macros for userspace,
569 * in case that we change this later. */
570 @@ -71,6 +82,49 @@ typedef enum {
571 ((p) >= LITMUS_HIGHEST_PRIORITY && \
572 (p) <= LITMUS_LOWEST_PRIORITY)
573
574 +/* reservation support */
575 +
576 +typedef enum {
577 + PERIODIC_POLLING,
578 + SPORADIC_POLLING,
579 + TABLE_DRIVEN,
580 +} reservation_type_t;
581 +
582 +struct lt_interval {
583 + lt_t start;
584 + lt_t end;
585 +};
586 +
587 +#ifndef __KERNEL__
588 +#define __user
589 +#endif
590 +
591 +struct reservation_config {
592 + unsigned int id;
593 + lt_t priority;
594 + int cpu;
595 +
596 + union {
597 + struct {
598 + lt_t period;
599 + lt_t budget;
600 + lt_t relative_deadline;
601 + lt_t offset;
602 + } polling_params;
603 +
604 + struct {
605 + lt_t major_cycle_length;
606 + unsigned int num_intervals;
607 + struct lt_interval __user *intervals;
608 + } table_driven_params;
609 + };
610 +
611 + //Need to inform which mode reservation belongs to in mc2
612 + int mode;
613 +};
614 +
615 +/* regular sporadic task support */
616 +
617 struct rt_task {
618 lt_t exec_cost;
619 lt_t period;
620 @@ -81,6 +135,10 @@ struct rt_task {
621 task_class_t cls;
622 budget_policy_t budget_policy; /* ignored by pfair */
623 release_policy_t release_policy;
624 +#ifdef CONFIG_PGMRT_SUPPORT
625 + pgm_node_type_t pgm_type;
626 + lt_t pgm_expected_etoe;
627 +#endif
628 };
629
630 union np_flag {
631 @@ -120,6 +178,14 @@ struct control_page {
632 uint64_t ts_syscall_start; /* Feather-Trace cycles */
633 uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
634 * started. */
635 + volatile uint64_t mode_poll_task; /* Set to 1 if the task invokes enact_mode syscall */
636 +
637 +#ifdef CONFIG_PGMRT_SUPPORT
638 + /* Flags from userspace signifying PGM wait states. */
639 + volatile uint32_t pgm_waiting; /* waiting for tokens */
640 + volatile uint32_t pgm_sending; /* sending tokens */
641 + volatile uint32_t pgm_satisfied; /* done waiting/sending */
642 +#endif
643
644 /* to be extended */
645 };
646 @@ -130,6 +196,7 @@ struct control_page {
647 #define LITMUS_CP_OFFSET_IRQ_COUNT 8
648 #define LITMUS_CP_OFFSET_TS_SC_START 16
649 #define LITMUS_CP_OFFSET_IRQ_SC_START 24
650 +#define LITMUS_CP_OFFSET_MODE_POLL_TASK 32
651
652 /* don't export internal data structures to user space (liblitmus) */
653 #ifdef __KERNEL__
654 @@ -165,6 +232,7 @@ struct rt_job {
655 };
656
657 struct pfair_param;
658 +struct mc2_task;
659
660 /* RT task parameters for scheduling extensions
661 * These parameters are inherited during clone and therefore must
662 @@ -246,7 +314,10 @@ struct rt_param {
663 volatile int linked_on;
664
665 /* PFAIR/PD^2 state. Allocated on demand. */
666 - struct pfair_param* pfair;
667 + union {
668 + void *plugin_state;
669 + struct pfair_param *pfair;
670 + };
671
672 /* Fields saved before BE->RT transition.
673 */
674 @@ -275,6 +346,10 @@ struct rt_param {
675
676 /* Pointer to the page shared between userspace and kernel. */
677 struct control_page * ctrl_page;
678 +
679 + /* Mixed-criticality specific data */
680 + struct mc2_task* mc2_data;
681 + unsigned long addr_ctrl_page;
682 };
683
684 #endif
685 diff --git include/litmus/sched_plugin.h include/litmus/sched_plugin.h
686 index 0ccccd6..4c8aaa6 100644
687 --- include/litmus/sched_plugin.h
688 +++ include/litmus/sched_plugin.h
689 @@ -77,6 +77,17 @@ typedef long (*wait_for_release_at_t)(lt_t release_time);
690 /* Informs the plugin when a synchronous release takes place. */
691 typedef void (*synchronous_release_at_t)(lt_t time_zero);
692
693 +/* How much budget has the current task consumed so far, and how much
694 + * has it left? The default implementation ties into the per-task
695 + * budget enforcement code. Plugins can override this to report
696 + * reservation-specific values. */
697 +typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining);
698 +
699 +/* Reservation creation/removal backends. Meaning of reservation_type and
700 + * reservation_id are entirely plugin-specific. */
701 +typedef long (*reservation_create_t)(int reservation_type, void* __user config);
702 +typedef long (*reservation_destroy_t)(unsigned int reservation_id, int cpu);
703 +
704 /************************ misc routines ***********************/
705
706
707 @@ -109,6 +120,12 @@ struct sched_plugin {
708 task_exit_t task_exit;
709 task_cleanup_t task_cleanup;
710
711 + current_budget_t current_budget;
712 +
713 + /* Reservation support */
714 + reservation_create_t reservation_create;
715 + reservation_destroy_t reservation_destroy;
716 +
717 #ifdef CONFIG_LITMUS_LOCKING
718 /* locking protocols */
719 allocate_lock_t allocate_lock;
720 diff --git include/litmus/sched_trace.h include/litmus/sched_trace.h
721 index 82bde82..80d9523 100644
722 --- include/litmus/sched_trace.h
723 +++ include/litmus/sched_trace.h
724 @@ -50,13 +50,12 @@ struct st_switch_away_data { /* A process was switched away from on a given CPU.
725 u64 exec_time;
726 };
727
728 -struct st_completion_data { /* A job completed. */
729 - u64 when;
730 - u8 forced:1; /* Set to 1 if job overran and kernel advanced to the
731 - * next task automatically; set to 0 otherwise.
732 - */
733 - u8 __uflags:7;
734 - u8 __unused[7];
735 +struct st_completion_data { /* A job completed. */
736 + u64 when;
737 + u8 forced:1; /* Set to 1 if job overran and kernel advanced to the
738 + * next job automatically; set to 0 otherwise.
739 + */
740 + u64 exec_time:63; /* Actual execution time of job. */
741 };
742
743 struct st_block_data { /* A task blocks. */
744 @@ -80,6 +79,21 @@ struct st_sys_release_data {
745 u64 release;
746 };
747
748 +struct st_enact_mode_data {
749 + u64 when;
750 + u8 __unused[8];
751 +};
752 +
753 +struct st_request_mode_data {
754 + u64 when;
755 + u8 __unused[8];
756 +};
757 +
758 +struct st_sys_start_data {
759 + u64 when;
760 + u64 start;
761 +};
762 +
763 #define DATA(x) struct st_ ## x ## _data x;
764
765 typedef enum {
766 @@ -87,14 +101,16 @@ typedef enum {
767 * uninitialized records. */
768 ST_PARAM,
769 ST_RELEASE,
770 - ST_ASSIGNED,
771 ST_SWITCH_TO,
772 ST_SWITCH_AWAY,
773 ST_COMPLETION,
774 ST_BLOCK,
775 ST_RESUME,
776 ST_ACTION,
777 - ST_SYS_RELEASE
778 + ST_SYS_RELEASE,
779 + ST_ENACT_MODE,
780 + ST_REQUEST_MODE,
781 + ST_SYS_START,
782 } st_event_record_type_t;
783
784 struct st_event_record {
785 @@ -105,7 +121,6 @@ struct st_event_record {
786 DATA(name);
787 DATA(param);
788 DATA(release);
789 - DATA(assigned);
790 DATA(switch_to);
791 DATA(switch_away);
792 DATA(completion);
793 @@ -113,6 +128,9 @@ struct st_event_record {
794 DATA(resume);
795 DATA(action);
796 DATA(sys_release);
797 + DATA(enact_mode);
798 + DATA(request_mode);
799 + DATA(sys_start);
800 } data;
801 };
802
803 @@ -155,6 +173,14 @@ feather_callback void do_sched_trace_action(unsigned long id,
804 feather_callback void do_sched_trace_sys_release(unsigned long id,
805 lt_t* start);
806
807 +feather_callback void do_sched_trace_enact_mode(unsigned long id,
808 + struct task_struct* task);
809 +
810 +feather_callback void do_sched_trace_request_mode(unsigned long id,
811 + struct task_struct* task);
812 +feather_callback void do_sched_trace_sys_start(unsigned long id,
813 + lt_t* start);
814 +
815 #endif
816
817 #else
818 @@ -179,6 +205,9 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
819 #define trace_litmus_task_block(t)
820 #define trace_litmus_task_resume(t)
821 #define trace_litmus_sys_release(start)
822 +#define trace_litmus_enact_mode(t)
823 +#define trace_litmus_request_mode(t)
824 +#define trace_litmus_sys_start(start)
825
826 #endif
827
828 @@ -254,6 +283,28 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
829
830 #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
831
832 +#define sched_trace_enact_mode(t) \
833 + do { \
834 + SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, \
835 + do_sched_trace_enact_mode, t); \
836 + trace_litmus_enact_mode(t); \
837 + } while (0)
838 +
839 +#define sched_trace_request_mode(t) \
840 + do { \
841 + SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, \
842 + do_sched_trace_request_mode, t); \
843 + trace_litmus_request_mode(t); \
844 + } while (0)
845 +
846 +/* when is a pointer, it does not need an explicit cast to unsigned long */
847 +#define sched_trace_sys_start(when) \
848 + do { \
849 + SCHED_TRACE(SCHED_TRACE_BASE_ID + 13, \
850 + do_sched_trace_sys_start, when); \
851 + trace_litmus_sys_start(when); \
852 + } while (0)
853 +
854 #endif /* __KERNEL__ */
855
856 #endif
857 diff --git include/litmus/trace.h include/litmus/trace.h
858 index 6017872..4dbb39ea 100644
859 --- include/litmus/trace.h
860 +++ include/litmus/trace.h
861 @@ -3,7 +3,6 @@
862
863 #ifdef CONFIG_SCHED_OVERHEAD_TRACE
864
865 -
866 #include <litmus/feather_trace.h>
867 #include <litmus/feather_buffer.h>
868
869 @@ -118,6 +117,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
870 #define TS_TICK_START(t) CPU_TTIMESTAMP(110, t)
871 #define TS_TICK_END(t) CPU_TTIMESTAMP(111, t)
872
873 +#define TS_RELEASE_C_START CPU_DTIMESTAMP(108, TSK_RT)
874 +#define TS_RELEASE_C_END CPU_DTIMESTAMP(109, TSK_RT)
875 +
876 #define TS_QUANTUM_BOUNDARY_START CPU_TIMESTAMP_CUR(112)
877 #define TS_QUANTUM_BOUNDARY_END CPU_TIMESTAMP_CUR(113)
878
879 @@ -137,6 +139,20 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
880 #define TS_SEND_RESCHED_START(c) MSG_TIMESTAMP_SENT(190, c)
881 #define TS_SEND_RESCHED_END MSG_TIMESTAMP_RECEIVED(191)
882
883 -#define TS_RELEASE_LATENCY(when) CPU_LTIMESTAMP(208, &(when))
884 +#define TS_ISR_START CPU_TIMESTAMP_CUR(192)
885 +#define TS_ISR_END CPU_TIMESTAMP_CUR(193)
886 +
887 +#define TS_MODE_CHANGE_START CPU_TIMESTAMP(194)
888 +#define TS_MODE_CHANGE_END CPU_TIMESTAMP(195)
889 +
890 +#define TS_RELEASE_LATENCY(when) CPU_LTIMESTAMP(208, &(when))
891 +#define TS_RELEASE_LATENCY_A(when) CPU_LTIMESTAMP(209, &(when))
892 +#define TS_RELEASE_LATENCY_B(when) CPU_LTIMESTAMP(210, &(when))
893 +#define TS_RELEASE_LATENCY_C(when) CPU_LTIMESTAMP(211, &(when))
894 +
895 +#define TS_SCHED_A_START CPU_DTIMESTAMP(212, TSK_UNKNOWN)
896 +#define TS_SCHED_A_END(t) CPU_TTIMESTAMP(213, t)
897 +#define TS_SCHED_C_START CPU_DTIMESTAMP(214, TSK_UNKNOWN)
898 +#define TS_SCHED_C_END(t) CPU_TTIMESTAMP(215, t)
899
900 #endif /* !_SYS_TRACE_H_ */
901 diff --git include/litmus/unistd_32.h include/litmus/unistd_32.h
902 index 94264c2..86bbbb8d 100644
903 --- include/litmus/unistd_32.h
904 +++ include/litmus/unistd_32.h
905 @@ -17,5 +17,13 @@
906 #define __NR_wait_for_ts_release __LSC(9)
907 #define __NR_release_ts __LSC(10)
908 #define __NR_null_call __LSC(11)
909 +#define __NR_get_current_budget __LSC(12)
910 +#define __NR_reservation_create __LSC(13)
911 +#define __NR_reservation_destroy __LSC(14)
912 +#define __NR_set_mc2_task_param __LSC(15)
913 +#define __NR_set_page_color __LSC(16)
914 +#define __NR_test_call __LSC(17)
915 +#define __NR_run_test __LSC(18)
916 +#define __NR_lock_buffer __LSC(19)
917
918 -#define NR_litmus_syscalls 12
919 +#define NR_litmus_syscalls 20
920 diff --git include/litmus/unistd_64.h include/litmus/unistd_64.h
921 index d5ced0d..4b96e7c 100644
922 --- include/litmus/unistd_64.h
923 +++ include/litmus/unistd_64.h
924 @@ -29,5 +29,22 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
925 __SYSCALL(__NR_release_ts, sys_release_ts)
926 #define __NR_null_call __LSC(11)
927 __SYSCALL(__NR_null_call, sys_null_call)
928 +#define __NR_get_current_budget __LSC(12)
929 +__SYSCALL(__NR_get_current_budget, sys_get_current_budget)
930 +#define __NR_reservation_create __LSC(13)
931 +__SYSCALL(__NR_reservation_create, sys_reservation_create)
932 +#define __NR_reservation_destroy __LSC(14)
933 +__SYSCALL(__NR_reservation_destroy, sys_reservation_destroy)
934 +#define __NR_set_mc2_task_param __LSC(15)
935 +__SYSCALL(__NR_set_mc2_task_param, sys_set_mc2_task_param)
936 +#define __NR_set_page_color __LSC(16)
937 +__SYSCALL(__NR_set_page_color, sys_set_page_color)
938 +#define __NR_test_call __LSC(17)
939 +__SYSCALL(__NR_test_call, sys_test_call)
940 +#define __NR_run_test __LSC(18)
941 +__SYSCALL(__NR_run_test, sys_run_test)
942 +#define __NR_lock_buffer __LSC(19)
943 +__SYACALL(__NR_lock_buffer, sys_lock_buffer)
944
945 -#define NR_litmus_syscalls 12
946 +
947 +#define NR_litmus_syscalls 20
948 diff --git include/trace/events/litmus.h include/trace/events/litmus.h
949 index 0fffcee..ef8d60f2 100644
950 --- include/trace/events/litmus.h
951 +++ include/trace/events/litmus.h
952 @@ -225,6 +225,76 @@ TRACE_EVENT(litmus_sys_release,
953 TP_printk("SynRelease(%Lu) at %Lu\n", __entry->rel, __entry->when)
954 );
955
956 +/*
957 + * Trace enact mode
958 + */
959 +TRACE_EVENT(litmus_enact_mode,
960 +
961 + TP_PROTO(struct task_struct *t),
962 +
963 + TP_ARGS(t),
964 +
965 + TP_STRUCT__entry(
966 + __field( pid_t, pid )
967 + __field( unsigned int, job )
968 + __field( lt_t, when )
969 + ),
970 +
971 + TP_fast_assign(
972 + __entry->pid = t ? t->pid : 0;
973 + __entry->job = t ? t->rt_param.job_params.job_no : 0;
974 + __entry->when = litmus_clock();
975 + ),
976 +
977 + TP_printk("Mode Enact at %Lu\n", __entry->when)
978 +);
979 +
980 +/*
981 + * Trace notice mode request
982 + */
983 +TRACE_EVENT(litmus_request_mode,
984 +
985 + TP_PROTO(struct task_struct *t),
986 +
987 + TP_ARGS(t),
988 +
989 + TP_STRUCT__entry(
990 + __field( pid_t, pid )
991 + __field( unsigned int, job )
992 + __field( lt_t, when )
993 + ),
994 +
995 + TP_fast_assign(
996 + __entry->pid = t ? t->pid : 0;
997 + __entry->job = t ? t->rt_param.job_params.job_no : 0;
998 + __entry->when = litmus_clock();
999 + ),
1000 +
1001 + TP_printk("Mode request at %Lu\n", __entry->when)
1002 +);
1003 +
1004 +/*
1005 + * Trace synchronous start
1006 + */
1007 +TRACE_EVENT(litmus_sys_start,
1008 +
1009 + TP_PROTO(lt_t *start),
1010 +
1011 + TP_ARGS(start),
1012 +
1013 + TP_STRUCT__entry(
1014 + __field( lt_t, rel )
1015 + __field( lt_t, when )
1016 + ),
1017 +
1018 + TP_fast_assign(
1019 + __entry->rel = *start;
1020 + __entry->when = litmus_clock();
1021 + ),
1022 +
1023 + TP_printk("SynStart(%Lu) at %Lu\n", __entry->rel, __entry->when)
1024 +);
1025 +
1026 #endif /* _SCHED_TASK_TRACEPOINT_H */
1027
1028 /* Must stay outside the protection */
1029 diff --git kernel/sched/litmus.c kernel/sched/litmus.c
1030 index 9d58690..60be718 100644
1031 --- kernel/sched/litmus.c
1032 +++ kernel/sched/litmus.c
1033 @@ -20,8 +20,9 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p)
1034 /* task counter */
1035 p->se.sum_exec_runtime += delta;
1036 if (delta) {
1037 - TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
1038 - delta, p->rt_param.job_params.exec_time, budget_remaining(p));
1039 + //TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
1040 + // delta, p->rt_param.job_params.exec_time, budget_remaining(p));
1041 + ;
1042 }
1043 /* sched_clock() */
1044 p->se.exec_start = rq->clock;
1045 diff --git litmus/Kconfig litmus/Kconfig
1046 index babb43d..d47548d 100644
1047 --- litmus/Kconfig
1048 +++ litmus/Kconfig
1049 @@ -243,7 +243,7 @@ config SCHED_TASK_TRACE
1050 config SCHED_TASK_TRACE_SHIFT
1051 int "Buffer size for sched_trace_xxx() events"
1052 depends on SCHED_TASK_TRACE
1053 - range 8 13
1054 + range 8 22
1055 default 9
1056 help
1057
1058 diff --git litmus/Makefile litmus/Makefile
1059 index 7970cd5..e274409 100644
1060 --- litmus/Makefile
1061 +++ litmus/Makefile
1062 @@ -11,6 +11,7 @@ obj-y = sched_plugin.o litmus.o \
1063 sync.o \
1064 rt_domain.o \
1065 edf_common.o \
1066 + mc2_common.o \
1067 fp_common.o \
1068 fdso.o \
1069 locking.o \
1070 @@ -19,13 +20,18 @@ obj-y = sched_plugin.o litmus.o \
1071 binheap.o \
1072 ctrldev.o \
1073 uncachedev.o \
1074 + reservation.o \
1075 + polling_reservations.o \
1076 sched_gsn_edf.o \
1077 sched_psn_edf.o \
1078 - sched_pfp.o
1079 + sched_pfp.o \
1080 + sched_mc2.o \
1081 + bank_proc.o \
1082 + color_shm.o \
1083 + cache_proc.o
1084
1085 obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
1086 obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
1087 -
1088 obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
1089 obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
1090 obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
1091 diff --git litmus/bank_proc.c litmus/bank_proc.c
1092 new file mode 100644
1093 index 0000000..097cff1
1094 --- /dev/null
1095 +++ litmus/bank_proc.c
1096 @@ -0,0 +1,793 @@
1097 +/*
1098 + * bank_proc.c -- Implementation of the page coloring for cache and bank partition.
1099 + * The file will keep a pool of colored pages. Users can require pages with
1100 + * specific color or bank number.
1101 + * Part of the code is modified from Jonathan Herman's code
1102 + */
1103 +#include <linux/init.h>
1104 +#include <linux/types.h>
1105 +#include <linux/kernel.h>
1106 +#include <linux/module.h>
1107 +#include <linux/sysctl.h>
1108 +#include <linux/slab.h>
1109 +#include <linux/io.h>
1110 +#include <linux/mutex.h>
1111 +#include <linux/mm.h>
1112 +#include <linux/random.h>
1113 +
1114 +#include <litmus/litmus_proc.h>
1115 +#include <litmus/sched_trace.h>
1116 +#include <litmus/litmus.h>
1117 +
1118 +#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
1119 +
1120 +// This Address Decoding is used in imx6-sabredsd platform
1121 +#define BANK_MASK 0x38000000
1122 +#define BANK_SHIFT 27
1123 +#define CACHE_MASK 0x0000f000
1124 +#define CACHE_SHIFT 12
1125 +
1126 +#define PAGES_PER_COLOR 2000
1127 +#define NUM_BANKS 8
1128 +#define NUM_COLORS 16
1129 +
1130 +unsigned int NUM_PAGE_LIST; //8*16
1131 +
1132 +unsigned int number_banks;
1133 +unsigned int number_cachecolors;
1134 +
1135 +unsigned int set_partition_max = 0x0000ffff;
1136 +unsigned int set_partition_min = 0;
1137 +unsigned int bank_partition_max = 0x000000ff;
1138 +unsigned int bank_partition_min = 0;
1139 +
1140 +int show_page_pool = 0;
1141 +int refill_page_pool = 0;
1142 +spinlock_t reclaim_lock;
1143 +
1144 +unsigned int set_partition[9] = {
1145 + 0x00000003, /* Core 0, and Level A*/
1146 + 0x00000003, /* Core 0, and Level B*/
1147 + 0x0000000C, /* Core 1, and Level A*/
1148 + 0x0000000C, /* Core 1, and Level B*/
1149 + 0x00000030, /* Core 2, and Level A*/
1150 + 0x00000030, /* Core 2, and Level B*/
1151 + 0x000000C0, /* Core 3, and Level A*/
1152 + 0x000000C0, /* Core 3, and Level B*/
1153 + 0x0000ff00, /* Level C */
1154 +};
1155 +
1156 +unsigned int bank_partition[9] = {
1157 + 0x00000010, /* Core 0, and Level A*/
1158 + 0x00000010, /* Core 0, and Level B*/
1159 + 0x00000020, /* Core 1, and Level A*/
1160 + 0x00000020, /* Core 1, and Level B*/
1161 + 0x00000040, /* Core 2, and Level A*/
1162 + 0x00000040, /* Core 2, and Level B*/
1163 + 0x00000080, /* Core 3, and Level A*/
1164 + 0x00000080, /* Core 3, and Level B*/
1165 + 0x0000000c, /* Level C */
1166 +};
1167 +
1168 +unsigned int set_index[9] = {
1169 + 0, 0, 0, 0, 0, 0, 0, 0, 0
1170 +};
1171 +
1172 +unsigned int bank_index[9] = {
1173 + 0, 0, 0, 0, 0, 0, 0, 0, 0
1174 +};
1175 +
1176 +int node_index[9] = {
1177 + -1, -1, -1, -1, -1, -1, -1, -1, -1
1178 +};
1179 +
1180 +struct mutex void_lockdown_proc;
1181 +
1182 +/*
1183 + * Every page list should contain a lock, a list, and a number recording how many pages it store
1184 + */
1185 +struct color_group {
1186 + spinlock_t lock;
1187 + char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
1188 + struct list_head list;
1189 + atomic_t nr_pages;
1190 +};
1191 +
1192 +
1193 +static struct color_group *color_groups;
1194 +
1195 +/*
1196 + * Naive function to count the number of 1's
1197 + */
1198 +unsigned int counting_one_set(unsigned int v)
1199 +{
1200 + unsigned int c; // c accumulates the total bits set in v
1201 +
1202 + for (c = 0; v; v >>= 1)
1203 + {
1204 + c += v & 1;
1205 + }
1206 + return c;
1207 +}
1208 +
1209 +unsigned int two_exp(unsigned int e)
1210 +{
1211 + unsigned int v = 1;
1212 + for (; e>0; e-- )
1213 + {
1214 + v=v*2;
1215 + }
1216 + return v;
1217 +}
1218 +
1219 +unsigned int num_by_bitmask_index(unsigned int bitmask, unsigned int index)
1220 +{
1221 + unsigned int pos = 0;
1222 +
1223 + while(true)
1224 + {
1225 + if(index ==0 && (bitmask & 1)==1)
1226 + {
1227 + break;
1228 + }
1229 + if(index !=0 && (bitmask & 1)==1){
1230 + index--;
1231 + }
1232 + pos++;
1233 + bitmask = bitmask >>1;
1234 +
1235 + }
1236 + return pos;
1237 +}
1238 +
1239 +/* helper functions to find the next colored pool index */
1240 +static inline unsigned int first_index(unsigned long node)
1241 +{
1242 + unsigned int bank_no = 0, color_no = 0;
1243 +
1244 + while(bank_no < NUM_BANKS) {
1245 + if ((bank_partition[node]>>bank_no) & 0x1)
1246 + break;
1247 + bank_no++;
1248 + }
1249 + while(color_no < NUM_COLORS) {
1250 + if ((set_partition[node]>>color_no) & 0x1)
1251 + break;
1252 + color_no++;
1253 + }
1254 + return NUM_COLORS*bank_no + color_no;
1255 +}
1256 +
1257 +static inline unsigned int last_index(unsigned long node)
1258 +{
1259 + unsigned int bank_no = 7, color_no = 15;
1260 +
1261 + while(bank_no >= 0) {
1262 + if ((bank_partition[node]>>bank_no) & 0x1)
1263 + break;
1264 + bank_no--;
1265 + }
1266 + while(color_no >= 0) {
1267 + if ((set_partition[node]>>color_no) & 0x1)
1268 + break;
1269 + color_no--;
1270 + }
1271 + return NUM_COLORS*bank_no + color_no;
1272 +}
1273 +
1274 +static inline unsigned int next_color(unsigned long node, unsigned int current_color)
1275 +{
1276 + int try = 0, ret = 0;
1277 + current_color++;
1278 + if (current_color == NUM_COLORS) {
1279 + current_color = 0;
1280 + ret = 1;
1281 + }
1282 +
1283 + while (try < NUM_COLORS) {
1284 + if ((set_partition[node]>>current_color)&0x1)
1285 + break;
1286 + current_color++;
1287 + if (current_color == NUM_COLORS) {
1288 + current_color = 0;
1289 + ret = 1;
1290 + }
1291 + try++;
1292 + }
1293 + if (!ret)
1294 + return current_color;
1295 + else
1296 + return current_color + NUM_COLORS;
1297 +}
1298 +
1299 +static inline unsigned int next_bank(unsigned long node, unsigned int current_bank)
1300 +{
1301 + int try = 0;
1302 + current_bank++;
1303 + if (current_bank == NUM_BANKS) {
1304 + current_bank = 0;
1305 + }
1306 +
1307 + while (try < NUM_BANKS) {
1308 + if ((bank_partition[node]>>current_bank)&0x1)
1309 + break;
1310 + current_bank++;
1311 + if (current_bank == NUM_BANKS) {
1312 + current_bank = 0;
1313 + }
1314 + try++;
1315 + }
1316 + return current_bank;
1317 +}
1318 +
1319 +static inline unsigned int get_next_index(unsigned long node, unsigned int current_index)
1320 +{
1321 + unsigned int bank_no, color_no, color_ret, bank_ret;
1322 + bank_no = current_index>>4; // 2^4 = 16 colors
1323 + color_no = current_index - bank_no*NUM_COLORS;
1324 + bank_ret = bank_no;
1325 + color_ret = next_color(node, color_no);
1326 + if (color_ret >= NUM_COLORS) {
1327 + // next bank
1328 + color_ret -= NUM_COLORS;
1329 + bank_ret = next_bank(node, bank_no);
1330 + }
1331 +
1332 + return bank_ret * NUM_COLORS + color_ret;
1333 +}
1334 +
1335 +/* Decoding page color, 0~15 */
1336 +static inline unsigned int page_color(struct page *page)
1337 +{
1338 + return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
1339 +}
1340 +
1341 +/* Decoding page bank number, 0~7 */
1342 +static inline unsigned int page_bank(struct page *page)
1343 +{
1344 + return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
1345 +}
1346 +
1347 +static inline unsigned int page_list_index(struct page *page)
1348 +{
1349 + unsigned int idx;
1350 + idx = (page_color(page) + page_bank(page)*(number_cachecolors));
1351 +
1352 + return idx;
1353 +}
1354 +
1355 +
1356 +
1357 +/*
1358 + * It is used to determine the smallest number of page lists.
1359 + */
1360 +static unsigned long smallest_nr_pages(void)
1361 +{
1362 + unsigned long i, min_pages;
1363 + struct color_group *cgroup;
1364 + cgroup = &color_groups[16*2];
1365 + min_pages =atomic_read(&cgroup->nr_pages);
1366 + for (i = 16*2; i < NUM_PAGE_LIST; ++i) {
1367 + cgroup = &color_groups[i];
1368 + if (atomic_read(&cgroup->nr_pages) < min_pages)
1369 + min_pages = atomic_read(&cgroup->nr_pages);
1370 + }
1371 + return min_pages;
1372 +}
1373 +
1374 +static void show_nr_pages(void)
1375 +{
1376 + unsigned long i;
1377 + struct color_group *cgroup;
1378 + printk("show nr pages***************************************\n");
1379 + for (i = 0; i < NUM_PAGE_LIST; ++i) {
1380 + cgroup = &color_groups[i];
1381 + printk("(%03ld) = %03d, ", i, atomic_read(&cgroup->nr_pages));
1382 + if((i % 8) ==7) {
1383 + printk("\n");
1384 + }
1385 + }
1386 +}
1387 +
1388 +/*
1389 + * Add a page to current pool.
1390 + */
1391 +void add_page_to_color_list(struct page *page)
1392 +{
1393 + const unsigned long color = page_list_index(page);
1394 + struct color_group *cgroup = &color_groups[color];
1395 + BUG_ON(in_list(&page->lru) || PageLRU(page));
1396 + BUG_ON(page_count(page) > 1);
1397 + spin_lock(&cgroup->lock);
1398 + list_add_tail(&page->lru, &cgroup->list);
1399 + atomic_inc(&cgroup->nr_pages);
1400 + SetPageLRU(page);
1401 + spin_unlock(&cgroup->lock);
1402 +}
1403 +
1404 +/*
1405 + * Replenish the page pool.
1406 + * If the newly allocate page is what we want, it will be pushed to the correct page list
1407 + * otherwise, it will be freed.
1408 + * A user needs to invoke this function until the page pool has enough pages.
1409 + */
1410 +static int do_add_pages(void)
1411 +{
1412 + struct page *page, *page_tmp;
1413 + LIST_HEAD(free_later);
1414 + unsigned long color;
1415 + int ret = 0;
1416 + int i = 0;
1417 + int free_counter = 0;
1418 + unsigned long counter[128]= {0};
1419 +
1420 + // until all the page lists contain enough pages
1421 + for (i=0; i< 1024*20;i++) {
1422 + page = alloc_page(GFP_HIGHUSER_MOVABLE);
1423 +
1424 + if (unlikely(!page)) {
1425 + printk(KERN_WARNING "Could not allocate pages.\n");
1426 + ret = -ENOMEM;
1427 + goto out;
1428 + }
1429 + color = page_list_index(page);
1430 + counter[color]++;
1431 + if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=0) {
1432 + add_page_to_color_list(page);
1433 + } else {
1434 + // Pages here will be freed later
1435 + list_add_tail(&page->lru, &free_later);
1436 + free_counter++;
1437 + }
1438 + }
1439 +
1440 + // Free the unwanted pages
1441 + list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
1442 + list_del(&page->lru);
1443 + __free_page(page);
1444 + }
1445 +out:
1446 + return ret;
1447 +}
1448 +
1449 +/*
1450 + * Provide pages for replacement according cache color
1451 + * This should be the only implementation here
1452 + * This function should not be accessed by others directly.
1453 + *
1454 + */
1455 +static struct page *new_alloc_page_color( unsigned long color)
1456 +{
1457 +// printk("allocate new page color = %d\n", color);
1458 + struct color_group *cgroup;
1459 + struct page *rPage = NULL;
1460 +
1461 + if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
1462 + TRACE_CUR("Wrong color %lu\n", color);
1463 + goto out;
1464 + }
1465 +
1466 +
1467 + cgroup = &color_groups[color];
1468 + spin_lock(&cgroup->lock);
1469 + if (unlikely(!atomic_read(&cgroup->nr_pages))) {
1470 + TRACE_CUR("No free %lu colored pages.\n", color);
1471 + goto out_unlock;
1472 + }
1473 + rPage = list_first_entry(&cgroup->list, struct page, lru);
1474 + BUG_ON(page_count(rPage) > 1);
1475 + //get_page(rPage);
1476 + list_del(&rPage->lru);
1477 + atomic_dec(&cgroup->nr_pages);
1478 + ClearPageLRU(rPage);
1479 +out_unlock:
1480 + spin_unlock(&cgroup->lock);
1481 +out:
1482 + return rPage;
1483 +}
1484 +
1485 +struct page* get_colored_page(unsigned long color)
1486 +{
1487 + return new_alloc_page_color(color);
1488 +}
1489 +
1490 +/*
1491 + * provide pages for replacement according to
1492 + * node = 0 for Level A tasks in Cpu 0
1493 + * node = 1 for Level B tasks in Cpu 0
1494 + * node = 2 for Level A tasks in Cpu 1
1495 + * node = 3 for Level B tasks in Cpu 1
1496 + * node = 4 for Level A tasks in Cpu 2
1497 + * node = 5 for Level B tasks in Cpu 2
1498 + * node = 6 for Level A tasks in Cpu 3
1499 + * node = 7 for Level B tasks in Cpu 3
1500 + * node = 8 for Level C tasks
1501 + */
1502 +struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
1503 +{
1504 + struct page *rPage = NULL;
1505 + int try = 0;
1506 + unsigned int idx;
1507 +
1508 + if (node_index[node] == -1)
1509 + idx = first_index(node);
1510 + else
1511 + idx = node_index[node];
1512 +
1513 + BUG_ON(idx<0 || idx>127);
1514 + rPage = new_alloc_page_color(idx);
1515 + if (node_index[node] == last_index(node))
1516 + node_index[node] = first_index(node);
1517 + else
1518 + node_index[node]++;
1519 +
1520 + while (!rPage) {
1521 + try++;
1522 + if (try>=256)
1523 + break;
1524 + idx = get_next_index(node, idx);
1525 + printk(KERN_ALERT "try = %d out of page! requesting node = %ld, idx = %d\n", try, node, idx);
1526 + BUG_ON(idx<0 || idx>127);
1527 + rPage = new_alloc_page_color(idx);
1528 + }
1529 + node_index[node] = idx;
1530 + return rPage;
1531 +}
1532 +
1533 +
1534 +/*
1535 + * Reclaim pages.
1536 + */
1537 +void reclaim_page(struct page *page)
1538 +{
1539 + const unsigned long color = page_list_index(page);
1540 + spin_lock(&reclaim_lock);
1541 + put_page(page);
1542 + add_page_to_color_list(page);
1543 +
1544 + spin_unlock(&reclaim_lock);
1545 + printk("Reclaimed page(%ld) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
1546 +}
1547 +
1548 +
1549 +/*
1550 + * Initialize the numbers of banks and cache colors
1551 + */
1552 +static void __init init_variables(void)
1553 +{
1554 + number_banks = counting_one_set(BANK_MASK);
1555 + number_banks = two_exp(number_banks);
1556 +
1557 + number_cachecolors = counting_one_set(CACHE_MASK);
1558 + number_cachecolors = two_exp(number_cachecolors);
1559 + NUM_PAGE_LIST = number_banks * number_cachecolors;
1560 + printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
1561 + mutex_init(&void_lockdown_proc);
1562 + spin_lock_init(&reclaim_lock);
1563 +
1564 +}
1565 +
1566 +
1567 +/*
1568 + * Initialize the page pool
1569 + */
1570 +static int __init init_color_groups(void)
1571 +{
1572 + struct color_group *cgroup;
1573 + unsigned long i;
1574 + int err = 0;
1575 +
1576 + printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
1577 + color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
1578 +
1579 + if (!color_groups) {
1580 + printk(KERN_WARNING "Could not allocate color groups.\n");
1581 + err = -ENOMEM;
1582 + }else{
1583 +
1584 + for (i = 0; i < NUM_PAGE_LIST; ++i) {
1585 + cgroup = &color_groups[i];
1586 + atomic_set(&cgroup->nr_pages, 0);
1587 + INIT_LIST_HEAD(&cgroup->list);
1588 + spin_lock_init(&cgroup->lock);
1589 + }
1590 + }
1591 + return err;
1592 +}
1593 +
1594 +int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1595 + size_t *lenp, loff_t *ppos)
1596 +{
1597 + int ret = 0, i = 0;
1598 + mutex_lock(&void_lockdown_proc);
1599 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1600 + if (ret)
1601 + goto out;
1602 + if (write) {
1603 + printk("New set Partition : \n");
1604 + for(i =0;i <9;i++)
1605 + {
1606 + set_index[i] = 0;
1607 + printk("set[%d] = %x \n", i, set_partition[i]);
1608 + }
1609 + }
1610 +out:
1611 + mutex_unlock(&void_lockdown_proc);
1612 + return ret;
1613 +}
1614 +
1615 +int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
1616 + size_t *lenp, loff_t *ppos)
1617 +{
1618 + int ret = 0, i = 0;
1619 + mutex_lock(&void_lockdown_proc);
1620 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1621 + if (ret)
1622 + goto out;
1623 + if (write) {
1624 + for(i =0;i <9;i++)
1625 + {
1626 + bank_index[i] = 0;
1627 + }
1628 + }
1629 +out:
1630 + mutex_unlock(&void_lockdown_proc);
1631 + return ret;
1632 +}
1633 +
1634 +int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1635 + size_t *lenp, loff_t *ppos)
1636 +{
1637 + int ret = 0;
1638 + mutex_lock(&void_lockdown_proc);
1639 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1640 + if (ret)
1641 + goto out;
1642 + if (write) {
1643 + show_nr_pages();
1644 + }
1645 +out:
1646 + mutex_unlock(&void_lockdown_proc);
1647 + return ret;
1648 +}
1649 +
1650 +int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
1651 + size_t *lenp, loff_t *ppos)
1652 +{
1653 + int ret = 0;
1654 + mutex_lock(&void_lockdown_proc);
1655 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1656 + if (ret)
1657 + goto out;
1658 + if (write) {
1659 + do_add_pages();
1660 + show_nr_pages();
1661 + }
1662 +out:
1663 + mutex_unlock(&void_lockdown_proc);
1664 + return ret;
1665 +}
1666 +
1667 +static struct ctl_table cache_table[] =
1668 +{
1669 +
1670 + {
1671 + .procname = "C0_LA_set",
1672 + .mode = 0666,
1673 + .proc_handler = set_partition_handler,
1674 + .data = &set_partition[0],
1675 + .maxlen = sizeof(set_partition[0]),
1676 + .extra1 = &set_partition_min,
1677 + .extra2 = &set_partition_max,
1678 + },
1679 + {
1680 + .procname = "C0_LB_set",
1681 + .mode = 0666,
1682 + .proc_handler = set_partition_handler,
1683 + .data = &set_partition[1],
1684 + .maxlen = sizeof(set_partition[1]),
1685 + .extra1 = &set_partition_min,
1686 + .extra2 = &set_partition_max,
1687 + },
1688 + {
1689 + .procname = "C1_LA_set",
1690 + .mode = 0666,
1691 + .proc_handler = set_partition_handler,
1692 + .data = &set_partition[2],
1693 + .maxlen = sizeof(set_partition[2]),
1694 + .extra1 = &set_partition_min,
1695 + .extra2 = &set_partition_max,
1696 + },
1697 + {
1698 + .procname = "C1_LB_set",
1699 + .mode = 0666,
1700 + .proc_handler = set_partition_handler,
1701 + .data = &set_partition[3],
1702 + .maxlen = sizeof(set_partition[3]),
1703 + .extra1 = &set_partition_min,
1704 + .extra2 = &set_partition_max,
1705 + },
1706 + {
1707 + .procname = "C2_LA_set",
1708 + .mode = 0666,
1709 + .proc_handler = set_partition_handler,
1710 + .data = &set_partition[4],
1711 + .maxlen = sizeof(set_partition[4]),
1712 + .extra1 = &set_partition_min,
1713 + .extra2 = &set_partition_max,
1714 + },
1715 + {
1716 + .procname = "C2_LB_set",
1717 + .mode = 0666,
1718 + .proc_handler = set_partition_handler,
1719 + .data = &set_partition[5],
1720 + .maxlen = sizeof(set_partition[5]),
1721 + .extra1 = &set_partition_min,
1722 + .extra2 = &set_partition_max,
1723 + },
1724 + {
1725 + .procname = "C3_LA_set",
1726 + .mode = 0666,
1727 + .proc_handler = set_partition_handler,
1728 + .data = &set_partition[6],
1729 + .maxlen = sizeof(set_partition[6]),
1730 + .extra1 = &set_partition_min,
1731 + .extra2 = &set_partition_max,
1732 + },
1733 + {
1734 + .procname = "C3_LB_set",
1735 + .mode = 0666,
1736 + .proc_handler = set_partition_handler,
1737 + .data = &set_partition[7],
1738 + .maxlen = sizeof(set_partition[7]),
1739 + .extra1 = &set_partition_min,
1740 + .extra2 = &set_partition_max,
1741 + },
1742 + {
1743 + .procname = "Call_LC_set",
1744 + .mode = 0666,
1745 + .proc_handler = set_partition_handler,
1746 + .data = &set_partition[8],
1747 + .maxlen = sizeof(set_partition[8]),
1748 + .extra1 = &set_partition_min,
1749 + .extra2 = &set_partition_max,
1750 + },
1751 + {
1752 + .procname = "C0_LA_bank",
1753 + .mode = 0666,
1754 + .proc_handler = bank_partition_handler,
1755 + .data = &bank_partition[0],
1756 + .maxlen = sizeof(set_partition[0]),
1757 + .extra1 = &bank_partition_min,
1758 + .extra2 = &bank_partition_max,
1759 + },
1760 + {
1761 + .procname = "C0_LB_bank",
1762 + .mode = 0666,
1763 + .proc_handler = bank_partition_handler,
1764 + .data = &bank_partition[1],
1765 + .maxlen = sizeof(set_partition[1]),
1766 + .extra1 = &bank_partition_min,
1767 + .extra2 = &bank_partition_max,
1768 + },
1769 + {
1770 + .procname = "C1_LA_bank",
1771 + .mode = 0666,
1772 + .proc_handler = bank_partition_handler,
1773 + .data = &bank_partition[2],
1774 + .maxlen = sizeof(set_partition[2]),
1775 + .extra1 = &bank_partition_min,
1776 + .extra2 = &bank_partition_max,
1777 + },
1778 + {
1779 + .procname = "C1_LB_bank",
1780 + .mode = 0666,
1781 + .proc_handler = bank_partition_handler,
1782 + .data = &bank_partition[3],
1783 + .maxlen = sizeof(set_partition[3]),
1784 + .extra1 = &bank_partition_min,
1785 + .extra2 = &bank_partition_max,
1786 + },
1787 + {
1788 + .procname = "C2_LA_bank",
1789 + .mode = 0666,
1790 + .proc_handler = bank_partition_handler,
1791 + .data = &bank_partition[4],
1792 + .maxlen = sizeof(set_partition[4]),
1793 + .extra1 = &bank_partition_min,
1794 + .extra2 = &bank_partition_max,
1795 + },
1796 + {
1797 + .procname = "C2_LB_bank",
1798 + .mode = 0666,
1799 + .proc_handler = bank_partition_handler,
1800 + .data = &bank_partition[5],
1801 + .maxlen = sizeof(set_partition[5]),
1802 + .extra1 = &bank_partition_min,
1803 + .extra2 = &bank_partition_max,
1804 + },
1805 + {
1806 + .procname = "C3_LA_bank",
1807 + .mode = 0666,
1808 + .proc_handler = bank_partition_handler,
1809 + .data = &bank_partition[6],
1810 + .maxlen = sizeof(set_partition[6]),
1811 + .extra1 = &bank_partition_min,
1812 + .extra2 = &bank_partition_max,
1813 + },
1814 + {
1815 + .procname = "C3_LB_bank",
1816 + .mode = 0666,
1817 + .proc_handler = bank_partition_handler,
1818 + .data = &bank_partition[7],
1819 + .maxlen = sizeof(set_partition[7]),
1820 + .extra1 = &bank_partition_min,
1821 + .extra2 = &bank_partition_max,
1822 + },
1823 + {
1824 + .procname = "Call_LC_bank",
1825 + .mode = 0666,
1826 + .proc_handler = bank_partition_handler,
1827 + .data = &bank_partition[8],
1828 + .maxlen = sizeof(set_partition[8]),
1829 + .extra1 = &bank_partition_min,
1830 + .extra2 = &bank_partition_max,
1831 + },
1832 + {
1833 + .procname = "show_page_pool",
1834 + .mode = 0666,
1835 + .proc_handler = show_page_pool_handler,
1836 + .data = &show_page_pool,
1837 + .maxlen = sizeof(show_page_pool),
1838 + }, {
1839 + .procname = "refill_page_pool",
1840 + .mode = 0666,
1841 + .proc_handler = refill_page_pool_handler,
1842 + .data = &refill_page_pool,
1843 + .maxlen = sizeof(refill_page_pool),
1844 + },
1845 + { }
1846 +};
1847 +
1848 +static struct ctl_table litmus_dir_table[] = {
1849 + {
1850 + .procname = "litmus",
1851 + .mode = 0555,
1852 + .child = cache_table,
1853 + },
1854 + { }
1855 +};
1856 +
1857 +
1858 +static struct ctl_table_header *litmus_sysctls;
1859 +
1860 +
1861 +/*
1862 + * Initialzie this proc
1863 + */
1864 +static int __init litmus_color_init(void)
1865 +{
1866 + int err=0;
1867 + printk("Init bankproc.c\n");
1868 +
1869 + init_variables();
1870 +
1871 + printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
1872 +
1873 + litmus_sysctls = register_sysctl_table(litmus_dir_table);
1874 + if (!litmus_sysctls) {
1875 + printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
1876 + err = -EFAULT;
1877 + goto out;
1878 + }
1879 +
1880 + init_color_groups();
1881 + do_add_pages();
1882 +
1883 + printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
1884 +out:
1885 + return err;
1886 +}
1887 +
1888 +module_init(litmus_color_init);
1889 +
1890 diff --git litmus/budget.c litmus/budget.c
1891 index 47bf78a..d67f4b3 100644
1892 --- litmus/budget.c
1893 +++ litmus/budget.c
1894 @@ -1,9 +1,11 @@
1895 #include <linux/sched.h>
1896 #include <linux/percpu.h>
1897 #include <linux/hrtimer.h>
1898 +#include <linux/uaccess.h>
1899
1900 #include <litmus/litmus.h>
1901 #include <litmus/preempt.h>
1902 +#include <litmus/sched_plugin.h>
1903
1904 #include <litmus/budget.h>
1905
1906 @@ -113,4 +115,54 @@ static int __init init_budget_enforcement(void)
1907 return 0;
1908 }
1909
1910 +void litmus_current_budget(lt_t *used_so_far, lt_t *remaining)
1911 +{
1912 + struct task_struct *t = current;
1913 + unsigned long flags;
1914 + s64 delta;
1915 +
1916 + local_irq_save(flags);
1917 +
1918 + delta = sched_clock_cpu(smp_processor_id()) - t->se.exec_start;
1919 + if (delta < 0)
1920 + delta = 0;
1921 +
1922 + TRACE_CUR("current_budget: sc:%llu start:%llu lt_t:%llu delta:%lld exec-time:%llu rem:%llu\n",
1923 + sched_clock_cpu(smp_processor_id()), t->se.exec_start,
1924 + litmus_clock(), delta,
1925 + tsk_rt(t)->job_params.exec_time,
1926 + budget_remaining(t));
1927 +
1928 + if (used_so_far)
1929 + *used_so_far = tsk_rt(t)->job_params.exec_time + delta;
1930 +
1931 + if (remaining) {
1932 + *remaining = budget_remaining(t);
1933 + if (*remaining > delta)
1934 + *remaining -= delta;
1935 + else
1936 + *remaining = 0;
1937 + }
1938 +
1939 + local_irq_restore(flags);
1940 +}
1941 +
1942 +asmlinkage long sys_get_current_budget(
1943 + lt_t __user * _expended,
1944 + lt_t __user *_remaining)
1945 +{
1946 + lt_t expended = 0, remaining = 0;
1947 +
1948 + if (is_realtime(current))
1949 + litmus->current_budget(&expended, &remaining);
1950 +
1951 + if (_expended && put_user(expended, _expended))
1952 + return -EFAULT;
1953 +
1954 + if (_remaining && put_user(remaining, _remaining))
1955 + return -EFAULT;
1956 +
1957 + return 0;
1958 +}
1959 +
1960 module_init(init_budget_enforcement);
1961 diff --git litmus/cache_proc.c litmus/cache_proc.c
1962 new file mode 100644
1963 index 0000000..5cdf953
1964 --- /dev/null
1965 +++ litmus/cache_proc.c
1966 @@ -0,0 +1,1414 @@
1967 +#include <asm/uaccess.h>
1968 +#include <linux/uaccess.h>
1969 +#include <linux/init.h>
1970 +#include <linux/types.h>
1971 +#include <linux/kernel.h>
1972 +#include <linux/module.h>
1973 +#include <linux/sysctl.h>
1974 +#include <linux/slab.h>
1975 +#include <linux/io.h>
1976 +#include <linux/mutex.h>
1977 +#include <linux/time.h>
1978 +#include <linux/random.h>
1979 +
1980 +#include <litmus/litmus_proc.h>
1981 +#include <litmus/sched_trace.h>
1982 +#include <litmus/cache_proc.h>
1983 +#include <litmus/mc2_common.h>
1984 +#include <litmus/litmus.h>
1985 +
1986 +#include <asm/hardware/cache-l2x0.h>
1987 +#include <asm/cacheflush.h>
1988 +
1989 +
1990 +#define UNLOCK_ALL 0x00000000 /* allocation in any way */
1991 +#define LOCK_ALL (~UNLOCK_ALL)
1992 +#define MAX_NR_WAYS 16
1993 +#define MAX_NR_COLORS 16
1994 +#define CACHELINE_SIZE 32
1995 +#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
1996 +#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
1997 +
1998 +typedef struct cacheline
1999 +{
2000 + int line[INTS_IN_CACHELINE];
2001 +} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
2002 +
2003 +void mem_lock(u32 lock_val, int cpu);
2004 +
2005 +/*
2006 + * unlocked_way[i] : allocation can occur in way i
2007 + *
2008 + * 0 = allocation can occur in the corresponding way
2009 + * 1 = allocation cannot occur in the corresponding way
2010 + */
2011 +u32 unlocked_way[MAX_NR_WAYS] = {
2012 + 0xFFFFFFFE, /* way 0 unlocked */
2013 + 0xFFFFFFFD,
2014 + 0xFFFFFFFB,
2015 + 0xFFFFFFF7,
2016 + 0xFFFFFFEF, /* way 4 unlocked */
2017 + 0xFFFFFFDF,
2018 + 0xFFFFFFBF,
2019 + 0xFFFFFF7F,
2020 + 0xFFFFFEFF, /* way 8 unlocked */
2021 + 0xFFFFFDFF,
2022 + 0xFFFFFBFF,
2023 + 0xFFFFF7FF,
2024 + 0xFFFFEFFF, /* way 12 unlocked */
2025 + 0xFFFFDFFF,
2026 + 0xFFFFBFFF,
2027 + 0xFFFF7FFF,
2028 +};
2029 +
2030 +u32 nr_unlocked_way[MAX_NR_WAYS+1] = {
2031 + 0x0000FFFF, /* all ways are locked. usable = 0*/
2032 + 0x0000FFFE, /* way ~0 unlocked. usable = 1 */
2033 + 0x0000FFFC,
2034 + 0x0000FFF8,
2035 + 0x0000FFF0,
2036 + 0x0000FFE0,
2037 + 0x0000FFC0,
2038 + 0x0000FF80,
2039 + 0x0000FF00,
2040 + 0x0000FE00,
2041 + 0x0000FC00,
2042 + 0x0000F800,
2043 + 0x0000F000,
2044 + 0x0000E000,
2045 + 0x0000C000,
2046 + 0x00008000,
2047 + 0x00000000, /* way ~15 unlocked. usable = 16 */
2048 +};
2049 +
2050 +u32 way_partition[4] = {
2051 + 0xfffffff0, /* cpu0 */
2052 + 0xffffff0f, /* cpu1 */
2053 + 0xfffff0ff, /* cpu2 */
2054 + 0xffff0fff, /* cpu3 */
2055 +};
2056 +
2057 +u32 way_partitions[9] = {
2058 + 0xffff0003, /* cpu0 A */
2059 + 0xffff0003, /* cpu0 B */
2060 + 0xffff000C, /* cpu1 A */
2061 + 0xffff000C, /* cpu1 B */
2062 + 0xffff0030, /* cpu2 A */
2063 + 0xffff0030, /* cpu2 B */
2064 + 0xffff00C0, /* cpu3 A */
2065 + 0xffff00C0, /* cpu3 B */
2066 + 0xffffff00, /* lv C */
2067 +};
2068 +
2069 +u32 prev_lockdown_d_reg[5] = {
2070 + 0x0000FF00,
2071 + 0x0000FF00,
2072 + 0x0000FF00,
2073 + 0x0000FF00,
2074 + 0x000000FF, /* share with level-C */
2075 +};
2076 +
2077 +u32 prev_lockdown_i_reg[5] = {
2078 + 0x0000FF00,
2079 + 0x0000FF00,
2080 + 0x0000FF00,
2081 + 0x0000FF00,
2082 + 0x000000FF, /* share with level-C */
2083 +};
2084 +
2085 +u32 prev_lbm_i_reg[8] = {
2086 + 0x00000000,
2087 + 0x00000000,
2088 + 0x00000000,
2089 + 0x00000000,
2090 + 0x00000000,
2091 + 0x00000000,
2092 + 0x00000000,
2093 + 0x00000000,
2094 +};
2095 +
2096 +u32 prev_lbm_d_reg[8] = {
2097 + 0x00000000,
2098 + 0x00000000,
2099 + 0x00000000,
2100 + 0x00000000,
2101 + 0x00000000,
2102 + 0x00000000,
2103 + 0x00000000,
2104 + 0x00000000,
2105 +};
2106 +
2107 +static void __iomem *cache_base;
2108 +static void __iomem *lockreg_d;
2109 +static void __iomem *lockreg_i;
2110 +
2111 +static u32 cache_id;
2112 +
2113 +struct mutex actlr_mutex;
2114 +struct mutex l2x0_prefetch_mutex;
2115 +struct mutex lockdown_proc;
2116 +static u32 way_partition_min;
2117 +static u32 way_partition_max;
2118 +
2119 +static int zero = 0;
2120 +static int one = 1;
2121 +
2122 +static int l1_prefetch_proc;
2123 +static int l2_prefetch_hint_proc;
2124 +static int l2_double_linefill_proc;
2125 +static int l2_data_prefetch_proc;
2126 +static int os_isolation;
2127 +static int use_part;
2128 +
2129 +u32 lockdown_reg[9] = {
2130 + 0x00000000,
2131 + 0x00000000,
2132 + 0x00000000,
2133 + 0x00000000,
2134 + 0x00000000,
2135 + 0x00000000,
2136 + 0x00000000,
2137 + 0x00000000,
2138 +};
2139 +
2140 +
2141 +#define ld_d_reg(cpu) ({ int __cpu = cpu; \
2142 + void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
2143 + __cpu * L2X0_LOCKDOWN_STRIDE; __v; })
2144 +#define ld_i_reg(cpu) ({ int __cpu = cpu; \
2145 + void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
2146 + __cpu * L2X0_LOCKDOWN_STRIDE; __v; })
2147 +
2148 +int lock_all;
2149 +int nr_lockregs;
2150 +static raw_spinlock_t cache_lock;
2151 +static raw_spinlock_t prefetch_lock;
2152 +static void ***flusher_pages = NULL;
2153 +
2154 +extern void l2c310_flush_all(void);
2155 +
2156 +static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
2157 +{
2158 + /* wait for cache operation by line or way to complete */
2159 + while (readl_relaxed(reg) & mask)
2160 + cpu_relax();
2161 +}
2162 +
2163 +#ifdef CONFIG_CACHE_L2X0
2164 +static inline void cache_wait(void __iomem *reg, unsigned long mask)
2165 +{
2166 + /* cache operations by line are atomic on PL310 */
2167 +}
2168 +#else
2169 +#define cache_wait cache_wait_way
2170 +#endif
2171 +
2172 +static inline void cache_sync(void)
2173 +{
2174 + void __iomem *base = cache_base;
2175 +
2176 + writel_relaxed(0, base + L2X0_CACHE_SYNC);
2177 + cache_wait(base + L2X0_CACHE_SYNC, 1);
2178 +}
2179 +
2180 +static void print_lockdown_registers(int cpu)
2181 +{
2182 + int i;
2183 + //for (i = 0; i < nr_lockregs; i++) {
2184 + for (i = 0; i < 4; i++) {
2185 + printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
2186 + i, readl_relaxed(ld_d_reg(i)));
2187 + printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
2188 + i, readl_relaxed(ld_i_reg(i)));
2189 + }
2190 +}
2191 +
2192 +static void test_lockdown(void *ignore)
2193 +{
2194 + int i, cpu;
2195 +
2196 + cpu = smp_processor_id();
2197 + printk("Start lockdown test on CPU %d.\n", cpu);
2198 +
2199 + for (i = 0; i < nr_lockregs; i++) {
2200 + printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
2201 + printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i));
2202 + }
2203 +
2204 + printk("Lockdown initial state:\n");
2205 + print_lockdown_registers(cpu);
2206 + printk("---\n");
2207 +
2208 + for (i = 0; i < nr_lockregs; i++) {
2209 + writel_relaxed(1, ld_d_reg(i));
2210 + writel_relaxed(2, ld_i_reg(i));
2211 + }
2212 + printk("Lockdown all data=1 instr=2:\n");
2213 + print_lockdown_registers(cpu);
2214 + printk("---\n");
2215 +
2216 + for (i = 0; i < nr_lockregs; i++) {
2217 + writel_relaxed((1 << i), ld_d_reg(i));
2218 + writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
2219 + }
2220 + printk("Lockdown varies:\n");
2221 + print_lockdown_registers(cpu);
2222 + printk("---\n");
2223 +
2224 + for (i = 0; i < nr_lockregs; i++) {
2225 + writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
2226 + writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
2227 + }
2228 + printk("Lockdown all zero:\n");
2229 + print_lockdown_registers(cpu);
2230 +
2231 + printk("End lockdown test.\n");
2232 +}
2233 +
2234 +void litmus_setup_lockdown(void __iomem *base, u32 id)
2235 +{
2236 + cache_base = base;
2237 + cache_id = id;
2238 + lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
2239 + lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
2240 +
2241 + if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
2242 + nr_lockregs = 8;
2243 + } else {
2244 + printk("Unknown cache ID!\n");
2245 + nr_lockregs = 1;
2246 + }
2247 +
2248 + mutex_init(&actlr_mutex);
2249 + mutex_init(&l2x0_prefetch_mutex);
2250 + mutex_init(&lockdown_proc);
2251 + raw_spin_lock_init(&cache_lock);
2252 + raw_spin_lock_init(&prefetch_lock);
2253 +
2254 + test_lockdown(NULL);
2255 +}
2256 +
2257 +int way_partition_handler(struct ctl_table *table, int write, void __user *buffer,
2258 + size_t *lenp, loff_t *ppos)
2259 +{
2260 + int ret = 0, i;
2261 + unsigned long flags;
2262 +
2263 + mutex_lock(&lockdown_proc);
2264 +
2265 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2266 + if (ret)
2267 + goto out;
2268 +
2269 + if (write) {
2270 + printk("Way-partition settings:\n");
2271 + for (i = 0; i < 9; i++) {
2272 + printk("0x%08X\n", way_partitions[i]);
2273 + }
2274 + for (i = 0; i < 4; i++) {
2275 + writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2276 + i * L2X0_LOCKDOWN_STRIDE);
2277 + writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2278 + i * L2X0_LOCKDOWN_STRIDE);
2279 + }
2280 + }
2281 +
2282 + local_irq_save(flags);
2283 + print_lockdown_registers(smp_processor_id());
2284 + l2c310_flush_all();
2285 + local_irq_restore(flags);
2286 +out:
2287 + mutex_unlock(&lockdown_proc);
2288 + return ret;
2289 +}
2290 +
2291 +int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
2292 + size_t *lenp, loff_t *ppos)
2293 +{
2294 + int ret = 0, i;
2295 + unsigned long flags;
2296 +
2297 + mutex_lock(&lockdown_proc);
2298 +
2299 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2300 + if (ret)
2301 + goto out;
2302 +
2303 + if (write && lock_all == 1) {
2304 + for (i = 0; i < nr_lockregs; i++) {
2305 + writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2306 + i * L2X0_LOCKDOWN_STRIDE);
2307 + writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2308 + i * L2X0_LOCKDOWN_STRIDE);
2309 + }
2310 +/*
2311 + for (i = 0; i < nr_lockregs; i++) {
2312 + barrier();
2313 + mem_lock(LOCK_ALL, i);
2314 + barrier();
2315 + //writel_relaxed(nr_unlocked_way[0], ld_d_reg(i));
2316 + //writel_relaxed(nr_unlocked_way[0], ld_i_reg(i));
2317 + }
2318 +*/
2319 + }
2320 + if (write && lock_all == 0) {
2321 + for (i = 0; i < nr_lockregs; i++) {
2322 + writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2323 + i * L2X0_LOCKDOWN_STRIDE);
2324 + writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2325 + i * L2X0_LOCKDOWN_STRIDE);
2326 + }
2327 +/*
2328 + for (i = 0; i < nr_lockregs; i++) {
2329 + barrier();
2330 + mem_lock(UNLOCK_ALL, i);
2331 + barrier();
2332 + //writel_relaxed(nr_unlocked_way[16], ld_d_reg(i));
2333 + //writel_relaxed(nr_unlocked_way[16], ld_i_reg(i));
2334 + }
2335 +*/
2336 + }
2337 + printk("LOCK_ALL HANDLER\n");
2338 + local_irq_save(flags);
2339 + print_lockdown_registers(smp_processor_id());
2340 + l2c310_flush_all();
2341 + local_irq_restore(flags);
2342 +out:
2343 + mutex_unlock(&lockdown_proc);
2344 + return ret;
2345 +}
2346 +
2347 +void cache_lockdown(u32 lock_val, int cpu)
2348 +{
2349 + //unsigned long flags;
2350 + //raw_spin_lock_irqsave(&cache_lock, flags);
2351 +
2352 + __asm__ __volatile__ (
2353 +" str %[lockval], [%[dcachereg]]\n"
2354 +" str %[lockval], [%[icachereg]]\n"
2355 + :
2356 + : [dcachereg] "r" (ld_d_reg(cpu)),
2357 + [icachereg] "r" (ld_i_reg(cpu)),
2358 + [lockval] "r" (lock_val)
2359 + : "cc");
2360 +
2361 + //raw_spin_unlock_irqrestore(&cache_lock, flags);
2362 +}
2363 +
2364 +void do_partition(enum crit_level lv, int cpu)
2365 +{
2366 + u32 regs;
2367 + unsigned long flags;
2368 +
2369 + if (lock_all || !use_part)
2370 + return;
2371 + raw_spin_lock_irqsave(&cache_lock, flags);
2372 + switch(lv) {
2373 + case CRIT_LEVEL_A:
2374 + regs = ~way_partitions[cpu*2];
2375 + regs &= 0x0000ffff;
2376 + break;
2377 + case CRIT_LEVEL_B:
2378 + regs = ~way_partitions[cpu*2+1];
2379 + regs &= 0x0000ffff;
2380 + break;
2381 + case CRIT_LEVEL_C:
2382 + case NUM_CRIT_LEVELS:
2383 + regs = ~way_partitions[8];
2384 + regs &= 0x0000ffff;
2385 + break;
2386 + case MODE_POLL_TASK:
2387 + regs = 0x0000ffff;
2388 + break;
2389 + default:
2390 + BUG();
2391 +
2392 + }
2393 + barrier();
2394 + //cache_lockdown(regs, cpu);
2395 + writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2396 + writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE);
2397 + barrier();
2398 +
2399 + raw_spin_unlock_irqrestore(&cache_lock, flags);
2400 +
2401 + //flush_cache(0);
2402 +}
2403 +
2404 +void lock_cache(int cpu, u32 val)
2405 +{
2406 + unsigned long flags;
2407 +
2408 + local_irq_save(flags);
2409 + if (val != 0xffffffff) {
2410 + writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2411 + cpu * L2X0_LOCKDOWN_STRIDE);
2412 + writel_relaxed(val, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2413 + cpu * L2X0_LOCKDOWN_STRIDE);
2414 + }
2415 + else {
2416 + int i;
2417 + for (i = 0; i < 4; i++)
2418 + do_partition(CRIT_LEVEL_A, i);
2419 + }
2420 + local_irq_restore(flags);
2421 +}
2422 +
2423 +int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
2424 + size_t *lenp, loff_t *ppos)
2425 +{
2426 + int ret = 0;
2427 +
2428 + mutex_lock(&lockdown_proc);
2429 +
2430 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2431 + if (ret)
2432 + goto out;
2433 +
2434 +
2435 + printk("USE_PART HANDLER = %d\n", use_part);
2436 +
2437 +out:
2438 + mutex_unlock(&lockdown_proc);
2439 + return ret;
2440 +}
2441 +
2442 +int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
2443 + size_t *lenp, loff_t *ppos)
2444 +{
2445 + int ret = 0;
2446 +
2447 + mutex_lock(&lockdown_proc);
2448 +
2449 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2450 + if (ret)
2451 + goto out;
2452 +
2453 +
2454 + printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
2455 +
2456 +out:
2457 + mutex_unlock(&lockdown_proc);
2458 + return ret;
2459 +}
2460 +
2461 +int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
2462 + size_t *lenp, loff_t *ppos)
2463 +{
2464 + int ret = 0, i;
2465 +
2466 + mutex_lock(&lockdown_proc);
2467 +
2468 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2469 + if (ret)
2470 + goto out;
2471 +
2472 + if (write) {
2473 + for (i = 0; i < nr_lockregs; i++) {
2474 + writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2475 + i * L2X0_LOCKDOWN_STRIDE);
2476 + writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2477 + i * L2X0_LOCKDOWN_STRIDE);
2478 + }
2479 + }
2480 +
2481 +out:
2482 + mutex_unlock(&lockdown_proc);
2483 + return ret;
2484 +}
2485 +
2486 +int lockdown_global_handler(struct ctl_table *table, int write, void __user *buffer,
2487 + size_t *lenp, loff_t *ppos)
2488 +{
2489 + int ret = 0, i;
2490 +
2491 + mutex_lock(&lockdown_proc);
2492 +
2493 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2494 + if (ret)
2495 + goto out;
2496 +
2497 + if (write) {
2498 + for (i = 0; i < nr_lockregs; i++) {
2499 + writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
2500 + i * L2X0_LOCKDOWN_STRIDE);
2501 + writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
2502 + i * L2X0_LOCKDOWN_STRIDE);
2503 + }
2504 + }
2505 +
2506 +out:
2507 + mutex_unlock(&lockdown_proc);
2508 + return ret;
2509 +}
2510 +
2511 +void inline enter_irq_mode(void)
2512 +{
2513 + int cpu = smp_processor_id();
2514 +
2515 + if (os_isolation == 0)
2516 + return;
2517 +
2518 + prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
2519 + prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
2520 +
2521 + writel_relaxed(way_partitions[8], ld_i_reg(cpu));
2522 + writel_relaxed(way_partitions[8], ld_d_reg(cpu));
2523 +}
2524 +
2525 +void inline exit_irq_mode(void)
2526 +{
2527 + int cpu = smp_processor_id();
2528 +
2529 + if (os_isolation == 0)
2530 + return;
2531 + writel_relaxed(prev_lockdown_i_reg[cpu], ld_i_reg(cpu));
2532 + writel_relaxed(prev_lockdown_d_reg[cpu], ld_d_reg(cpu));
2533 +}
2534 +
2535 +/* Operate on the Cortex-A9's ACTLR register */
2536 +#define ACTLR_L2_PREFETCH_HINT (1 << 1)
2537 +#define ACTLR_L1_PREFETCH (1 << 2)
2538 +
2539 +/*
2540 + * Change the ACTLR.
2541 + * @mode - If 1 (0), set (clear) the bit given in @mask in the ACTLR.
2542 + * @mask - A mask in which one bit is set to operate on the ACTLR.
2543 + */
2544 +static void actlr_change(int mode, int mask)
2545 +{
2546 + u32 orig_value, new_value, reread_value;
2547 +
2548 + if (0 != mode && 1 != mode) {
2549 + printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2550 + __FUNCTION__);
2551 + return;
2552 + }
2553 +
2554 + /* get the original value */
2555 + asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (orig_value));
2556 +
2557 + if (0 == mode)
2558 + new_value = orig_value & ~(mask);
2559 + else
2560 + new_value = orig_value | mask;
2561 +
2562 + asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (new_value));
2563 + asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (reread_value));
2564 +
2565 + printk("ACTLR: orig: 0x%8x wanted: 0x%8x new: 0x%8x\n",
2566 + orig_value, new_value, reread_value);
2567 +}
2568 +
2569 +int litmus_l1_prefetch_proc_handler(struct ctl_table *table, int write,
2570 + void __user *buffer, size_t *lenp, loff_t *ppos)
2571 +{
2572 + int ret, mode;
2573 +
2574 + mutex_lock(&actlr_mutex);
2575 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2576 +
2577 + if (!ret && write) {
2578 + mode = *((int*)table->data);
2579 + actlr_change(mode, ACTLR_L1_PREFETCH);
2580 + }
2581 + mutex_unlock(&actlr_mutex);
2582 +
2583 + return ret;
2584 +}
2585 +
2586 +int litmus_l2_prefetch_hint_proc_handler(struct ctl_table *table, int write,
2587 + void __user *buffer, size_t *lenp, loff_t *ppos)
2588 +{
2589 + int ret, mode;
2590 +
2591 + mutex_lock(&actlr_mutex);
2592 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2593 + if (!ret && write) {
2594 + mode = *((int*)table->data);
2595 + actlr_change(mode, ACTLR_L2_PREFETCH_HINT);
2596 + }
2597 + mutex_unlock(&actlr_mutex);
2598 +
2599 + return ret;
2600 +}
2601 +
2602 +
2603 +/* Operate on the PL-310's Prefetch Control Register, L310_PREFETCH_CTRL */
2604 +#define L2X0_PREFETCH_DOUBLE_LINEFILL (1 << 30)
2605 +#define L2X0_PREFETCH_INST_PREFETCH (1 << 29)
2606 +#define L2X0_PREFETCH_DATA_PREFETCH (1 << 28)
2607 +static void l2x0_prefetch_change(int mode, int mask)
2608 +{
2609 + u32 orig_value, new_value, reread_value;
2610 +
2611 + if (0 != mode && 1 != mode) {
2612 + printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
2613 + __FUNCTION__);
2614 + return;
2615 + }
2616 +
2617 + orig_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2618 +
2619 + if (0 == mode)
2620 + new_value = orig_value & ~(mask);
2621 + else
2622 + new_value = orig_value | mask;
2623 +
2624 + writel_relaxed(new_value, cache_base + L310_PREFETCH_CTRL);
2625 + reread_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
2626 +
2627 + printk("l2x0 prefetch: orig: 0x%8x wanted: 0x%8x new: 0x%8x\n",
2628 + orig_value, new_value, reread_value);
2629 +}
2630 +
2631 +int litmus_l2_double_linefill_proc_handler(struct ctl_table *table, int write,
2632 + void __user *buffer, size_t *lenp, loff_t *ppos)
2633 +{
2634 + int ret, mode;
2635 +
2636 + mutex_lock(&l2x0_prefetch_mutex);
2637 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2638 + if (!ret && write) {
2639 + mode = *((int*)table->data);
2640 + l2x0_prefetch_change(mode, L2X0_PREFETCH_DOUBLE_LINEFILL);
2641 + }
2642 + mutex_unlock(&l2x0_prefetch_mutex);
2643 +
2644 + return ret;
2645 +}
2646 +
2647 +int litmus_l2_data_prefetch_proc_handler(struct ctl_table *table, int write,
2648 + void __user *buffer, size_t *lenp, loff_t *ppos)
2649 +{
2650 + int ret, mode;
2651 +
2652 + mutex_lock(&l2x0_prefetch_mutex);
2653 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2654 + if (!ret && write) {
2655 + mode = *((int*)table->data);
2656 + l2x0_prefetch_change(mode, L2X0_PREFETCH_DATA_PREFETCH|L2X0_PREFETCH_INST_PREFETCH);
2657 + }
2658 + mutex_unlock(&l2x0_prefetch_mutex);
2659 +
2660 + return ret;
2661 +}
2662 +
2663 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
2664 + void __user *buffer, size_t *lenp, loff_t *ppos);
2665 +
2666 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
2667 + void __user *buffer, size_t *lenp, loff_t *ppos);
2668 +
2669 +static struct ctl_table cache_table[] =
2670 +{
2671 + {
2672 + .procname = "C0_LA_way",
2673 + .mode = 0666,
2674 + .proc_handler = way_partition_handler,
2675 + .data = &way_partitions[0],
2676 + .maxlen = sizeof(way_partitions[0]),
2677 + .extra1 = &way_partition_min,
2678 + .extra2 = &way_partition_max,
2679 + },
2680 + {
2681 + .procname = "C0_LB_way",
2682 + .mode = 0666,
2683 + .proc_handler = way_partition_handler,
2684 + .data = &way_partitions[1],
2685 + .maxlen = sizeof(way_partitions[1]),
2686 + .extra1 = &way_partition_min,
2687 + .extra2 = &way_partition_max,
2688 + },
2689 + {
2690 + .procname = "C1_LA_way",
2691 + .mode = 0666,
2692 + .proc_handler = way_partition_handler,
2693 + .data = &way_partitions[2],
2694 + .maxlen = sizeof(way_partitions[2]),
2695 + .extra1 = &way_partition_min,
2696 + .extra2 = &way_partition_max,
2697 + },
2698 + {
2699 + .procname = "C1_LB_way",
2700 + .mode = 0666,
2701 + .proc_handler = way_partition_handler,
2702 + .data = &way_partitions[3],
2703 + .maxlen = sizeof(way_partitions[3]),
2704 + .extra1 = &way_partition_min,
2705 + .extra2 = &way_partition_max,
2706 + },
2707 + {
2708 + .procname = "C2_LA_way",
2709 + .mode = 0666,
2710 + .proc_handler = way_partition_handler,
2711 + .data = &way_partitions[4],
2712 + .maxlen = sizeof(way_partitions[4]),
2713 + .extra1 = &way_partition_min,
2714 + .extra2 = &way_partition_max,
2715 + },
2716 + {
2717 + .procname = "C2_LB_way",
2718 + .mode = 0666,
2719 + .proc_handler = way_partition_handler,
2720 + .data = &way_partitions[5],
2721 + .maxlen = sizeof(way_partitions[5]),
2722 + .extra1 = &way_partition_min,
2723 + .extra2 = &way_partition_max,
2724 + },
2725 + {
2726 + .procname = "C3_LA_way",
2727 + .mode = 0666,
2728 + .proc_handler = way_partition_handler,
2729 + .data = &way_partitions[6],
2730 + .maxlen = sizeof(way_partitions[6]),
2731 + .extra1 = &way_partition_min,
2732 + .extra2 = &way_partition_max,
2733 + },
2734 + {
2735 + .procname = "C3_LB_way",
2736 + .mode = 0666,
2737 + .proc_handler = way_partition_handler,
2738 + .data = &way_partitions[7],
2739 + .maxlen = sizeof(way_partitions[7]),
2740 + .extra1 = &way_partition_min,
2741 + .extra2 = &way_partition_max,
2742 + },
2743 + {
2744 + .procname = "Call_LC_way",
2745 + .mode = 0666,
2746 + .proc_handler = way_partition_handler,
2747 + .data = &way_partitions[8],
2748 + .maxlen = sizeof(way_partitions[8]),
2749 + .extra1 = &way_partition_min,
2750 + .extra2 = &way_partition_max,
2751 + },
2752 + {
2753 + .procname = "lock_all",
2754 + .mode = 0666,
2755 + .proc_handler = lock_all_handler,
2756 + .data = &lock_all,
2757 + .maxlen = sizeof(lock_all),
2758 + .extra1 = &zero,
2759 + .extra2 = &one,
2760 + },
2761 + {
2762 + .procname = "l1_prefetch",
2763 + .mode = 0644,
2764 + .proc_handler = litmus_l1_prefetch_proc_handler,
2765 + .data = &l1_prefetch_proc,
2766 + .maxlen = sizeof(l1_prefetch_proc),
2767 + },
2768 + {
2769 + .procname = "l2_prefetch_hint",
2770 + .mode = 0644,
2771 + .proc_handler = litmus_l2_prefetch_hint_proc_handler,
2772 + .data = &l2_prefetch_hint_proc,
2773 + .maxlen = sizeof(l2_prefetch_hint_proc),
2774 + },
2775 + {
2776 + .procname = "l2_double_linefill",
2777 + .mode = 0644,
2778 + .proc_handler = litmus_l2_double_linefill_proc_handler,
2779 + .data = &l2_double_linefill_proc,
2780 + .maxlen = sizeof(l2_double_linefill_proc),
2781 + },
2782 + {
2783 + .procname = "l2_data_prefetch",
2784 + .mode = 0644,
2785 + .proc_handler = litmus_l2_data_prefetch_proc_handler,
2786 + .data = &l2_data_prefetch_proc,
2787 + .maxlen = sizeof(l2_data_prefetch_proc),
2788 + },
2789 + {
2790 + .procname = "os_isolation",
2791 + .mode = 0644,
2792 + .proc_handler = os_isolation_proc_handler,
2793 + .data = &os_isolation,
2794 + .maxlen = sizeof(os_isolation),
2795 + },
2796 + {
2797 + .procname = "use_part",
2798 + .mode = 0644,
2799 + .proc_handler = use_part_proc_handler,
2800 + .data = &use_part,
2801 + .maxlen = sizeof(use_part),
2802 + },
2803 + {
2804 + .procname = "do_perf_test",
2805 + .mode = 0644,
2806 + .proc_handler = do_perf_test_proc_handler,
2807 + },
2808 + {
2809 + .procname = "setup_flusher",
2810 + .mode = 0644,
2811 + .proc_handler = setup_flusher_proc_handler,
2812 + },
2813 + {
2814 + .procname = "lockdown_reg_0",
2815 + .mode = 0644,
2816 + .proc_handler = lockdown_reg_handler,
2817 + .data = &lockdown_reg[0],
2818 + .maxlen = sizeof(lockdown_reg[0]),
2819 + .extra1 = &way_partition_min,
2820 + .extra2 = &way_partition_max,
2821 + },
2822 + {
2823 + .procname = "lockdown_reg_1",
2824 + .mode = 0644,
2825 + .proc_handler = lockdown_reg_handler,
2826 + .data = &lockdown_reg[1],
2827 + .maxlen = sizeof(lockdown_reg[1]),
2828 + .extra1 = &way_partition_min,
2829 + .extra2 = &way_partition_max,
2830 + },
2831 + {
2832 + .procname = "lockdown_reg_2",
2833 + .mode = 0644,
2834 + .proc_handler = lockdown_reg_handler,
2835 + .data = &lockdown_reg[2],
2836 + .maxlen = sizeof(lockdown_reg[2]),
2837 + .extra1 = &way_partition_min,
2838 + .extra2 = &way_partition_max,
2839 + },
2840 + {
2841 + .procname = "lockdown_reg_3",
2842 + .mode = 0644,
2843 + .proc_handler = lockdown_reg_handler,
2844 + .data = &lockdown_reg[3],
2845 + .maxlen = sizeof(lockdown_reg[3]),
2846 + .extra1 = &way_partition_min,
2847 + .extra2 = &way_partition_max,
2848 + },
2849 + {
2850 + .procname = "lockdown_regs",
2851 + .mode = 0644,
2852 + .proc_handler = lockdown_global_handler,
2853 + .data = &lockdown_reg[8],
2854 + .maxlen = sizeof(lockdown_reg[8]),
2855 + .extra1 = &way_partition_min,
2856 + .extra2 = &way_partition_max,
2857 + },
2858 + { }
2859 +};
2860 +
2861 +static struct ctl_table litmus_dir_table[] = {
2862 + {
2863 + .procname = "litmus",
2864 + .mode = 0555,
2865 + .child = cache_table,
2866 + },
2867 + { }
2868 +};
2869 +
2870 +u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
2871 +{
2872 + u32 v = 0;
2873 +
2874 + __asm__ __volatile__ (
2875 +" .align 5\n"
2876 +" str %[lockval], [%[cachereg]]\n"
2877 +"1: ldr %[val], [%[addr]], #32 @ 32 bytes = 1 cache line\n"
2878 +" cmp %[end], %[addr] @ subtracts addr from end\n"
2879 +" bgt 1b\n @ read more, if necessary\n"
2880 + : [addr] "+r" (start),
2881 + [val] "+r" (v)
2882 + : [end] "r" (end),
2883 +#ifdef CONFIG_CACHE_L2X0
2884 + [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2885 +#else
2886 + [cachereg] "r" (lockreg_d),
2887 +#endif
2888 + [lockval] "r" (lock_val)
2889 + : "cc");
2890 +
2891 + return v;
2892 +}
2893 +
2894 +
2895 +/*
2896 + * Prefetch by reading the first word of each cache line in a page.
2897 + *
2898 + * @lockdown_reg: address of the lockdown register to write
2899 + * @lock_val: value to be written to @lockdown_reg
2900 + * @unlock_val: will unlock the cache to this value
2901 + * @addr: start address to be prefetched
2902 + * @end_addr: end address to prefetch (exclusive)
2903 + *
2904 + * Assumes: addr < end_addr AND addr != end_addr
2905 + */
2906 +u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
2907 +{
2908 +#ifndef CONFIG_CACHE_L2X0
2909 + unsigned long flags;
2910 +#endif
2911 + u32 v = 0;
2912 +
2913 +#ifndef CONFIG_CACHE_L2X0
2914 + raw_spin_lock_irqsave(&prefetch_lock, flags);
2915 +#endif
2916 +
2917 + __asm__ __volatile__ (
2918 +" .align 5\n"
2919 +" str %[lockval], [%[cachereg]]\n"
2920 +"1: ldr %[val], [%[addr]], #32 @ 32 bytes = 1 cache line\n"
2921 +" cmp %[end], %[addr] @ subtracts addr from end\n"
2922 +" bgt 1b\n @ read more, if necessary\n"
2923 +" str %[unlockval], [%[cachereg]]\n"
2924 + : [addr] "+r" (start),
2925 + [val] "+r" (v)
2926 + : [end] "r" (end),
2927 +#ifdef CONFIG_CACHE_L2X0
2928 + [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
2929 +#else
2930 + [cachereg] "r" (lockreg_d),
2931 +#endif
2932 + [lockval] "r" (lock_val),
2933 + [unlockval] "r" (unlock_val)
2934 + : "cc");
2935 +
2936 +#ifndef CONFIG_CACHE_L2X0
2937 + raw_spin_unlock_irqrestore(&prefetch_lock, flags);
2938 +#endif
2939 +
2940 + return v;
2941 +}
2942 +
2943 +static long update_timeval(struct timespec lhs, struct timespec rhs)
2944 +{
2945 + long val;
2946 + struct timespec ts;
2947 +
2948 + ts = timespec_sub(rhs, lhs);
2949 + val = ts.tv_sec*NSEC_PER_SEC + ts.tv_nsec;
2950 +
2951 + return val;
2952 +}
2953 +
2954 +extern void v7_flush_kern_dcache_area(void *, size_t);
2955 +extern void v7_flush_kern_cache_all(void);
2956 +/*
2957 + * Ensure that this page is not in the L1 or L2 cache.
2958 + * Since the L1 cache is VIPT and the L2 cache is PIPT, we can use either the
2959 + * kernel or user vaddr.
2960 + */
2961 +void color_flush_page(void *vaddr, size_t size)
2962 +{
2963 + v7_flush_kern_dcache_area(vaddr, size);
2964 + //v7_flush_kern_cache_all();
2965 +}
2966 +
2967 +extern struct page* get_colored_page(unsigned long color);
2968 +
2969 +int setup_flusher_array(void)
2970 +{
2971 + int color, way, ret = 0;
2972 + struct page *page;
2973 +
2974 + if (flusher_pages != NULL)
2975 + goto out;
2976 +
2977 + flusher_pages = (void***) kmalloc(MAX_NR_WAYS
2978 + * sizeof(*flusher_pages), GFP_KERNEL);
2979 + if (!flusher_pages) {
2980 + printk(KERN_WARNING "No memory for flusher array!\n");
2981 + ret = -EINVAL;
2982 + goto out;
2983 + }
2984 +
2985 + for (way = 0; way < MAX_NR_WAYS; way++) {
2986 + void **flusher_color_arr;
2987 + flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
2988 + * MAX_NR_COLORS, GFP_KERNEL);
2989 + if (!flusher_color_arr) {
2990 + printk(KERN_WARNING "No memory for flusher array!\n");
2991 + ret = -ENOMEM;
2992 + goto out_free;
2993 + }
2994 +
2995 + flusher_pages[way] = flusher_color_arr;
2996 +
2997 + for (color = 0; color < MAX_NR_COLORS; color++) {
2998 + int node;
2999 + switch (color) {
3000 + case 0:
3001 + node = 48;
3002 + break;
3003 + case 1:
3004 + node = 49;
3005 + break;
3006 + case 2:
3007 + node = 50;
3008 + break;
3009 + case 3:
3010 + node = 51;
3011 + break;
3012 + case 4:
3013 + node = 68;
3014 + break;
3015 + case 5:
3016 + node = 69;
3017 + break;
3018 + case 6:
3019 + node = 86;
3020 + break;
3021 + case 7:
3022 + node = 87;
3023 + break;
3024 + case 8:
3025 + node = 88;
3026 + break;
3027 + case 9:
3028 + node = 105;
3029 + break;
3030 + case 10:
3031 + node = 106;
3032 + break;
3033 + case 11:
3034 + node = 107;
3035 + break;
3036 + case 12:
3037 + node = 108;
3038 + break;
3039 + case 13:
3040 + node = 125;
3041 + break;
3042 + case 14:
3043 + node = 126;
3044 + break;
3045 + case 15:
3046 + node = 127;
3047 + break;
3048 + }
3049 + page = get_colored_page(node);
3050 + if (!page) {
3051 + printk(KERN_WARNING "no more colored pages\n");
3052 + ret = -EINVAL;
3053 + goto out_free;
3054 + }
3055 + flusher_pages[way][color] = page_address(page);
3056 + if (!flusher_pages[way][color]) {
3057 + printk(KERN_WARNING "bad page address\n");
3058 + ret = -EINVAL;
3059 + goto out_free;
3060 + }
3061 + }
3062 + }
3063 +out:
3064 + return ret;
3065 +out_free:
3066 + for (way = 0; way < MAX_NR_WAYS; way++) {
3067 + for (color = 0; color < MAX_NR_COLORS; color++) {
3068 + /* not bothering to try and give back colored pages */
3069 + }
3070 + kfree(flusher_pages[way]);
3071 + }
3072 + kfree(flusher_pages);
3073 + flusher_pages = NULL;
3074 + return ret;
3075 +}
3076 +
3077 +void flush_cache2(int all)
3078 +{
3079 + int way, color, cpu;
3080 + unsigned long flags;
3081 +
3082 + raw_spin_lock_irqsave(&cache_lock, flags);
3083 + cpu = raw_smp_processor_id();
3084 +
3085 + prev_lbm_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
3086 + prev_lbm_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
3087 + for (way=0;way<MAX_NR_WAYS;way++) {
3088 + if (( (0x00000001 << way) & (prev_lbm_d_reg[cpu]) ) &&
3089 + !all)
3090 + continue;
3091 + for (color=0;color<MAX_NR_COLORS;color++) {
3092 + void *vaddr = flusher_pages[way][color];
3093 + u32 lvalue = unlocked_way[way];
3094 + color_read_in_mem_lock(lvalue, LOCK_ALL,
3095 + vaddr, vaddr + PAGE_SIZE);
3096 + }
3097 +
3098 + }
3099 +
3100 + writel_relaxed(prev_lbm_i_reg[cpu], ld_i_reg(cpu));
3101 + writel_relaxed(prev_lbm_d_reg[cpu], ld_d_reg(cpu));
3102 + raw_spin_unlock_irqrestore(&cache_lock, flags);
3103 +}
3104 +
3105 +/* src = shared, dst = local */
3106 +#if 0 // random
3107 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
3108 +{
3109 + /* size is in KB */
3110 + long ret = 0;
3111 + lt_t t1, t2;
3112 + int numlines = size * CACHELINES_IN_1KB;
3113 + int next, sum = 0, ran;
3114 + unsigned long flags;
3115 +
3116 + get_random_bytes(&ran, sizeof(int));
3117 + next = ran % ((size*1024)/sizeof(cacheline_t));
3118 +
3119 + //preempt_disable();
3120 + if (type == 1) {
3121 + int i, j;
3122 + color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
3123 + color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
3124 +
3125 + local_irq_save(flags);
3126 + t1 = litmus_clock();
3127 + for (i = 0; i < numlines; i++) {
3128 + next = src[next].line[0];
3129 + for (j = 1; j < INTS_IN_CACHELINE; j++) {
3130 + dst[next].line[j] = src[next].line[j]; // read
3131 + //src[next].line[j] = dst[next].line[j]; // write
3132 + }
3133 + }
3134 + t2 = litmus_clock();
3135 + local_irq_restore(flags);
3136 + sum = next + (int)t2;
3137 + t2 -= t1;
3138 + ret = put_user(t2, ts);
3139 + }
3140 + else {
3141 + int i, j;
3142 + color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
3143 + local_irq_save(flags);
3144 + t1 = litmus_clock();
3145 + for (i = 0; i < numlines; i++) {
3146 + next = src[next].line[0];
3147 + for (j = 1; j < INTS_IN_CACHELINE; j++) {
3148 + dst[next].line[j] = src[next].line[j]; //read
3149 + //src[next].line[j] = dst[next].line[j]; //write
3150 + }
3151 + }
3152 + t2 = litmus_clock();
3153 + local_irq_restore(flags);
3154 + sum = next + (int)t2;
3155 + t2 -= t1;
3156 + ret = put_user(t2, ts);
3157 + v7_flush_kern_dcache_area(src, size*1024);
3158 + }
3159 + //preempt_enable();
3160 + //flush_cache(1);
3161 +
3162 + return ret;
3163 +}
3164 +#else
3165 +// sequential
3166 +asmlinkage long sys_run_test(int type, int size, cacheline_t *src, cacheline_t *dst, lt_t __user *ts)
3167 +{
3168 + /* size is in KB */
3169 + long ret = 0;
3170 + lt_t t1, t2;
3171 + int numlines = size * CACHELINES_IN_1KB;
3172 + int sum = 0;
3173 + unsigned long flags;
3174 +
3175 + //preempt_disable();
3176 + if (type == 1) {
3177 + int i, j;
3178 + color_read_in_mem_lock(0x0000FFF0, 0x0000000f, (void*)src, (void*)src + size*1024);
3179 + color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
3180 +
3181 + local_irq_save(flags);
3182 + t1 = litmus_clock();
3183 + for (i = 0; i < numlines; i++) {
3184 + for (j = 0; j < INTS_IN_CACHELINE; j++) {
3185 + //dst[i].line[j] = src[i].line[j]; // read
3186 + src[i].line[j] = dst[i].line[j]; // write
3187 + }
3188 + }
3189 + t2 = litmus_clock();
3190 + local_irq_restore(flags);
3191 + sum = (int)(t1 + t2);
3192 + t2 -= t1;
3193 + ret = put_user(t2, ts);
3194 + }
3195 + else {
3196 + int i, j;
3197 + color_read_in_mem_lock(0x0000FF0F, 0x0000000f, (void*)dst, (void*)dst + size*1024);
3198 + local_irq_save(flags);
3199 + t1 = litmus_clock();
3200 + for (i = 0; i < numlines; i++) {
3201 + for (j = 0; j < INTS_IN_CACHELINE; j++) {
3202 + //dst[i].line[j] = src[i].line[j]; //read
3203 + src[i].line[j] = dst[i].line[j]; //write
3204 + }
3205 + }
3206 + t2 = litmus_clock();
3207 + local_irq_restore(flags);
3208 + sum = (int)(t1 + t2);
3209 + t2 -= t1;
3210 + ret = put_user(t2, ts);
3211 + v7_flush_kern_dcache_area(src, size*1024);
3212 + }
3213 + //preempt_enable();
3214 + //flush_cache(1);
3215 +
3216 + return ret;
3217 +}
3218 +#endif
3219 +
3220 +/*
3221 + * sys_lock_buffer
3222 + * @vaddr: The address of the buffer.
3223 + * @size: Size of the buffer.
3224 + * @lock_way: way mask for locking buffers. Use 0 for the desired way.
3225 + * @unlock_way: way mask after locking buffers
3226 + * Returns 0 on success.
3227 + * The system call locks a buffer into LLC.
3228 + */
3229 +asmlinkage long sys_lock_buffer(void* vaddr, size_t size, u32 lock_way, u32 unlock_way)
3230 +{
3231 + /* size is in bytes */
3232 + long ret = 0;
3233 + u32 lock_val, unlock_val;
3234 +
3235 + lock_val = ~lock_way & 0x0000ffff;
3236 + unlock_val = ~unlock_way & 0x0000ffff;
3237 + color_read_in_mem_lock(lock_val, unlock_val, (void*)vaddr, (void*)vaddr + size);
3238 +
3239 + return ret;
3240 +}
3241 +
3242 +#define TRIALS 1000
3243 +
3244 +static int perf_test(void) {
3245 + struct timespec before, after;
3246 + struct page *page;
3247 + void *vaddr;
3248 + u32 *data;
3249 + long time, flush_time;
3250 + int i, num_pages = 1;
3251 + unsigned int order = 4;
3252 +
3253 + for (i = 0; i < order; i++) {
3254 + num_pages = num_pages*2;
3255 + }
3256 +
3257 + printk("Number of pages: %d\n", num_pages);
3258 + //page = alloc_page(__GFP_MOVABLE);
3259 + page = alloc_pages(__GFP_MOVABLE, order);
3260 + if (!page) {
3261 + printk(KERN_WARNING "No memory\n");
3262 + return -ENOMEM;
3263 + }
3264 +
3265 + vaddr = page_address(page);
3266 + if (!vaddr)
3267 + printk(KERN_WARNING "%s: vaddr is null\n", __FUNCTION__);
3268 + data = (u32*) vaddr;
3269 +
3270 + getnstimeofday(&before);
3271 + barrier();
3272 + for (i = 0; i < TRIALS; i++) {
3273 + color_flush_page(vaddr, PAGE_SIZE*num_pages);
3274 + }
3275 + barrier();
3276 + getnstimeofday(&after);
3277 + time = update_timeval(before, after);
3278 + printk("Average for flushes without re-reading: %ld\n", time / TRIALS);
3279 + flush_time = time / TRIALS;
3280 +
3281 + color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
3282 +
3283 + barrier();
3284 + getnstimeofday(&before);
3285 + barrier();
3286 + for (i = 0; i < TRIALS; i++) {
3287 + color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
3288 + }
3289 + barrier();
3290 + getnstimeofday(&after);
3291 + time = update_timeval(before, after);
3292 + printk("Average for read from cache: %ld\n", time / TRIALS);
3293 +
3294 + getnstimeofday(&before);
3295 + barrier();
3296 + for (i = 0; i < TRIALS; i++) {
3297 + color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
3298 + color_flush_page(vaddr, PAGE_SIZE*num_pages);
3299 + }
3300 + barrier();
3301 + getnstimeofday(&after);
3302 + time = update_timeval(before, after);
3303 + printk("Average for read from mem: %ld (%ld)\n", time / TRIALS - flush_time, time / TRIALS);
3304 +
3305 + // write in locked way
3306 + color_read_in_mem_lock(nr_unlocked_way[2], LOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
3307 + for (i = 0; i < PAGE_SIZE*num_pages/sizeof(u32); i++) {
3308 + data[i] = i%63353;
3309 + }
3310 + // read
3311 + barrier();
3312 + getnstimeofday(&before);
3313 + barrier();
3314 + for (i = 0; i < TRIALS; i++) {
3315 + color_read_in_mem(unlocked_way[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
3316 + }
3317 + barrier();
3318 + getnstimeofday(&after);
3319 + time = update_timeval(before, after);
3320 + printk("Average for read in after write: %ld\n", time / TRIALS);
3321 +
3322 +
3323 + //free_page((unsigned long)vaddr);
3324 + free_pages((unsigned long)vaddr, order);
3325 +
3326 + return 0;
3327 +}
3328 +
3329 +int do_perf_test_proc_handler(struct ctl_table *table, int write,
3330 + void __user *buffer, size_t *lenp, loff_t *ppos)
3331 +{
3332 + int ret = 0;
3333 +
3334 + if (write) {
3335 + ret = perf_test();
3336 + }
3337 +
3338 + return ret;
3339 +}
3340 +
3341 +int setup_flusher_proc_handler(struct ctl_table *table, int write,
3342 + void __user *buffer, size_t *lenp, loff_t *ppos)
3343 +{
3344 + int ret = -EINVAL;
3345 +
3346 + if (write && flusher_pages == NULL) {
3347 + ret = setup_flusher_array();
3348 + printk(KERN_INFO "setup flusher return: %d\n", ret);
3349 +
3350 + }
3351 + else if (flusher_pages) {
3352 + printk(KERN_INFO "flusher_pages is already set!\n");
3353 + ret = 0;
3354 + }
3355 +
3356 + return ret;
3357 +}
3358 +
3359 +static struct ctl_table_header *litmus_sysctls;
3360 +
3361 +static int __init litmus_sysctl_init(void)
3362 +{
3363 + int ret = 0;
3364 +
3365 + printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n");
3366 + litmus_sysctls = register_sysctl_table(litmus_dir_table);
3367 + if (!litmus_sysctls) {
3368 + printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n");
3369 + ret = -EFAULT;
3370 + goto out;
3371 + }
3372 +
3373 + way_partition_min = 0x00000000;
3374 + way_partition_max = 0x0000FFFF;
3375 +
3376 +out:
3377 + return ret;
3378 +}
3379 +
3380 +module_init(litmus_sysctl_init);
3381 diff --git litmus/color_shm.c litmus/color_shm.c
3382 new file mode 100644
3383 index 0000000..638c6a6
3384 --- /dev/null
3385 +++ litmus/color_shm.c
3386 @@ -0,0 +1,401 @@
3387 +#include <linux/sched.h>
3388 +#include <linux/mm.h>
3389 +#include <linux/fs.h>
3390 +#include <linux/miscdevice.h>
3391 +#include <linux/spinlock.h>
3392 +#include <linux/module.h>
3393 +#include <linux/highmem.h>
3394 +#include <linux/slab.h>
3395 +#include <linux/mutex.h>
3396 +#include <asm/uaccess.h>
3397 +
3398 +#include <litmus/litmus.h>
3399 +
3400 +#define DEV_NAME "litmus/color_shm"
3401 +
3402 +/* Major number assigned to our device.
3403 + * Refer Documentation/devices.txt */
3404 +#define SHM_MAJOR 240
3405 +#define MAX_COLORED_PAGE 256
3406 +#define NUM_BANKS 8
3407 +#define NUM_COLORS 16
3408 +
3409 +static struct mutex dev_lock;
3410 +static int bypass_cache;
3411 +
3412 +struct color_ioctl_cmd {
3413 + unsigned int color;
3414 + unsigned int bank;
3415 +};
3416 +
3417 +struct color_ioctl_offset {
3418 + unsigned long offset;
3419 + int lock;
3420 +};
3421 +
3422 +#define SET_COLOR_SHM_CMD _IOW(SHM_MAJOR, 0x1, struct color_ioctl_cmd)
3423 +#define SET_COLOR_SHM_OFFSET _IOW(SHM_MAJOR, 0x2, struct color_ioctl_offset)
3424 +
3425 +struct color_ioctl_cmd color_param;
3426 +struct color_ioctl_offset color_offset;
3427 +
3428 +static int mmap_common_checks(struct vm_area_struct *vma)
3429 +{
3430 + /* you can only map the "first" page */
3431 + if (vma->vm_pgoff != 0)
3432 + return -EINVAL;
3433 +
3434 + return 0;
3435 +}
3436 +
3437 +static void mmap_common_vma_flags(struct vm_area_struct *vma)
3438 +{
3439 + /* This mapping should not be kept across forks,
3440 + * cannot be expanded, and is not a "normal" page. */
3441 + //vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO | VM_SHARED | VM_MAYSHARE;
3442 + vma->vm_flags |= VM_SHARED | VM_MAYSHARE | VM_LOCKED;
3443 +
3444 + /* We don't want the first write access to trigger a "minor" page fault
3445 + * to mark the page as dirty. This is transient, private memory, we
3446 + * don't care if it was touched or not. __S011 means RW access, but not
3447 + * execute, and avoids copy-on-write behavior.
3448 + * See protection_map in mmap.c. */
3449 + vma->vm_page_prot = PAGE_SHARED;
3450 +}
3451 +
3452 +#define vma_nr_pages(vma) \
3453 + ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;})
3454 +
3455 +extern struct page* get_colored_page(unsigned long color);
3456 +
3457 +static int do_map_colored_page(struct vm_area_struct *vma,
3458 + const unsigned long addr,
3459 + const unsigned long color_no)
3460 +{
3461 + int err = 0;
3462 +
3463 + struct page *page = get_colored_page(color_no);
3464 +
3465 + if (!page) {
3466 + printk(KERN_INFO "Could not get page with color %lu.\n",
3467 + color_no);
3468 + err = -ENOMEM;
3469 + goto out;
3470 + }
3471 +
3472 + printk(KERN_INFO "vma: %p addr: 0x%lx color_no: %lu\n",
3473 + vma, addr, color_no);
3474 +
3475 + printk(KERN_INFO "vm_start: %lu vm_end: %lu\n",
3476 + vma->vm_start, vma->vm_end);
3477 +
3478 + printk(KERN_INFO "inserting page (pa: 0x%x) at vaddr: 0x%lx "
3479 + "flags: 0x%lx prot: 0x%x\n",
3480 + page_to_phys(page), addr,
3481 + vma->vm_flags, pgprot_val(vma->vm_page_prot));
3482 +
3483 +
3484 + err = vm_insert_page(vma, addr, page);
3485 + if (err) {
3486 + printk(KERN_INFO "vm_insert_page() failed (%d)\n", err);
3487 + err = -EINVAL;
3488 + goto out;
3489 + }
3490 +out:
3491 + return err;
3492 +}
3493 +
3494 +static int do_map_colored_pages(struct vm_area_struct *vma)
3495 +{
3496 + const unsigned long nr_pages = vma_nr_pages(vma);
3497 + unsigned long nr_mapped;
3498 + int i, start_bank = -1, start_color = -1;
3499 + int cur_bank = -1, cur_color = -1, err = 0;
3500 + int colors[16] = {0}, banks[8] = {0};
3501 +
3502 + if (bypass_cache == 1)
3503 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3504 +
3505 + for (i = 0; i < NUM_BANKS; i++) {
3506 + if (((color_param.bank >> i)&0x1) == 1)
3507 + banks[i] = 1;
3508 + }
3509 +
3510 + for (i = 0; i < NUM_COLORS; i++) {
3511 + if (((color_param.color >> i)&0x1) == 1)
3512 + colors[i] = 1;
3513 + }
3514 +
3515 + for (i = 0; i < NUM_BANKS; i++) {
3516 + if (banks[i] == 1) {
3517 + start_bank = i;
3518 + break;
3519 + }
3520 + }
3521 + for (i = 0; i < NUM_COLORS; i++) {
3522 + if (colors[i] == 1) {
3523 + start_color = i;
3524 + break;
3525 + }
3526 + }
3527 +
3528 + cur_bank = start_bank;
3529 + cur_color = start_color;
3530 +
3531 + for (i = 0; i < NUM_BANKS; i++) {
3532 + printk(KERN_INFO "BANK[%d] = %d\n", i, banks[i]);
3533 + }
3534 + printk(KERN_INFO "cur_bank = %d\n", cur_bank);
3535 + for (i = 0; i < NUM_COLORS; i++) {
3536 + printk(KERN_INFO "COLOR[%d] = %d\n", i, colors[i]);
3537 + }
3538 + printk(KERN_INFO "cur_color = %d\n", cur_color);
3539 +
3540 +
3541 + TRACE_CUR("allocating %lu pages (flags:%lx prot:%lx)\n",
3542 + nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot));
3543 +
3544 + for (nr_mapped = 0; nr_mapped < nr_pages; nr_mapped++) {
3545 + const unsigned long addr = vma->vm_start + (nr_mapped << PAGE_SHIFT);
3546 + const unsigned long color_no = cur_bank*NUM_COLORS + cur_color;
3547 +
3548 + err = do_map_colored_page(vma, addr, color_no);
3549 + printk(KERN_INFO "mapped bank[%d], color[%d], color_no = %lu at 0x%lx\n",
3550 + cur_bank, cur_color, color_no, addr);
3551 + if (err) {
3552 + TRACE_CUR("Could not map colored page set.\n");
3553 + err = -EINVAL;
3554 + goto out;
3555 + }
3556 + do {
3557 + cur_color++;
3558 + } while(colors[cur_color] == 0);
3559 +
3560 + if (cur_color >= NUM_COLORS) {
3561 + do {
3562 + cur_bank++;
3563 + } while(banks[cur_bank] == 0);
3564 + cur_color = start_color;
3565 + }
3566 +
3567 + if (cur_bank >= NUM_BANKS) {
3568 + cur_bank = start_bank;
3569 + }
3570 + }
3571 + TRACE_CUR("Successfully mapped %lu pages.\n", nr_mapped);
3572 + out:
3573 + return err;
3574 +}
3575 +
3576 +static int map_colored_pages(struct vm_area_struct *vma)
3577 +{
3578 + int err = 0;
3579 +
3580 + printk(KERN_INFO "User requests %lu pages.\n", vma_nr_pages(vma));
3581 + if (MAX_COLORED_PAGE < vma_nr_pages(vma)) {
3582 + TRACE_CUR("Max page request %lu but want %lu.\n",
3583 + MAX_COLORED_PAGE, vma_nr_pages(vma));
3584 + err = -EINVAL;
3585 + goto out;
3586 + }
3587 + err = do_map_colored_pages(vma);
3588 +out:
3589 + return err;
3590 +}
3591 +
3592 +static void litmus_color_shm_vm_close(struct vm_area_struct *vma)
3593 +{
3594 +
3595 + TRACE_CUR("flags=0x%lx prot=0x%lx\n",
3596 + vma->vm_flags, pgprot_val(vma->vm_page_prot));
3597 +
3598 + TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n",
3599 + (void*) vma->vm_start, (void*) vma->vm_end, vma,
3600 + vma->vm_private_data);
3601 +
3602 +}
3603 +
3604 +static int litmus_color_shm_vm_fault(struct vm_area_struct *vma,
3605 + struct vm_fault *vmf)
3606 +{
3607 + /* This function should never be called, since
3608 + * all pages should have been mapped by mmap()
3609 + * already. */
3610 + TRACE_CUR("flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff);
3611 + printk(KERN_INFO "flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff);
3612 +
3613 + printk(KERN_INFO "Page fault in color ctrl page! prot=0x%x\n", pgprot_val(vma->vm_page_prot));
3614 +
3615 + return VM_FAULT_SIGBUS;
3616 +}
3617 +
3618 +static struct vm_operations_struct litmus_color_shm_vm_ops = {
3619 + .close = litmus_color_shm_vm_close,
3620 + .fault = litmus_color_shm_vm_fault,
3621 +};
3622 +
3623 +static int litmus_color_shm_mmap(struct file *filp, struct vm_area_struct *vma)
3624 +{
3625 + int err = 0;
3626 +
3627 + printk(KERN_INFO "mmap called\n");
3628 +
3629 + if (color_param.color == 0x00000000 || color_param.bank == 0x00000000) {
3630 + printk(KERN_INFO "color_info not set.\n");
3631 + return -EINVAL;
3632 + }
3633 + if (color_offset.offset == 0xffffffff || color_offset.lock == -1) {
3634 + printk(KERN_INFO "color_offset not set.\n");
3635 + return -EINVAL;
3636 + }
3637 +
3638 + err = mmap_common_checks(vma);
3639 + if (err) {
3640 + TRACE_CUR("failed mmap common checks\n");
3641 + goto out;
3642 + }
3643 +
3644 + vma->vm_ops = &litmus_color_shm_vm_ops;
3645 + mmap_common_vma_flags(vma);
3646 +
3647 + err = map_colored_pages(vma);
3648 +
3649 + TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags,
3650 + pgprot_val(vma->vm_page_prot));
3651 +out:
3652 + color_param.color = 0x00000000;
3653 + color_param.bank = 0x00000000;
3654 + color_offset.offset = 0xffffffff;
3655 + color_offset.lock = -1;
3656 +
3657 + return err;
3658 +
3659 +}
3660 +
3661 +static long litmus_color_shm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3662 +{
3663 + long err = -ENOIOCTLCMD;
3664 + struct color_ioctl_cmd color_info;
3665 + struct color_ioctl_offset color_off;
3666 +
3667 + printk(KERN_INFO "color_shm ioctl\n");
3668 +
3669 + if (_IOC_TYPE(cmd) != SHM_MAJOR)
3670 + return -ENOTTY;
3671 +
3672 +
3673 + switch (cmd) {
3674 + case SET_COLOR_SHM_CMD:
3675 +
3676 + err = copy_from_user(&color_info, (void*)arg, sizeof(struct color_ioctl_cmd));
3677 +
3678 + color_param.color = color_info.color;
3679 + color_param.bank = color_info.bank;
3680 + printk(KERN_INFO "COLOR = %x\n", color_param.color);
3681 + printk(KERN_INFO "BANK = %x\n", color_param.bank);
3682 + err = 0;
3683 + break;
3684 + case SET_COLOR_SHM_OFFSET:
3685 + err = copy_from_user(&color_off, (void*)arg, sizeof(struct color_ioctl_offset));
3686 +
3687 + color_offset.offset = color_off.offset;
3688 + color_offset.lock = color_off.lock;
3689 + printk(KERN_INFO "OFFSET = %lx\n", color_offset.offset);
3690 + printk(KERN_INFO "LOCK = %d\n", color_offset.lock);
3691 + err = 0;
3692 + break;
3693 +
3694 + default:
3695 + printk(KERN_INFO "Invalid IOCTL CMD\n");
3696 + err = -EINVAL;
3697 + }
3698 +
3699 + return err;
3700 +}
3701 +
3702 +static struct file_operations litmus_color_shm_fops = {
3703 + .owner = THIS_MODULE,
3704 + .mmap = litmus_color_shm_mmap,
3705 + .unlocked_ioctl = litmus_color_shm_ioctl,
3706 +};
3707 +
3708 +static struct miscdevice litmus_color_shm_dev = {
3709 + .name = DEV_NAME,
3710 + .minor = MISC_DYNAMIC_MINOR,
3711 + .fops = &litmus_color_shm_fops,
3712 +};
3713 +
3714 +struct mutex bypass_mutex;
3715 +
3716 +int bypass_proc_handler(struct ctl_table *table, int write,
3717 + void __user *buffer, size_t *lenp, loff_t *ppos)
3718 +{
3719 + int ret;
3720 +
3721 + mutex_lock(&bypass_mutex);
3722 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
3723 + printk(KERN_INFO "shm_bypass = %d\n", bypass_cache);
3724 + mutex_unlock(&bypass_mutex);
3725 +
3726 + return ret;
3727 +}
3728 +
3729 +static int zero = 0;
3730 +static int one = 1;
3731 +
3732 +static struct ctl_table cache_table[] =
3733 +{
3734 + {
3735 + .procname = "shm_bypass",
3736 + .mode = 0666,
3737 + .proc_handler = bypass_proc_handler,
3738 + .data = &bypass_cache,
3739 + .maxlen = sizeof(bypass_cache),
3740 + .extra1 = &zero,
3741 + .extra2 = &one,
3742 + },
3743 + { }
3744 +};
3745 +
3746 +static struct ctl_table litmus_dir_table[] = {
3747 + {
3748 + .procname = "litmus",
3749 + .mode = 0555,
3750 + .child = cache_table,
3751 + },
3752 + { }
3753 +};
3754 +
3755 +static struct ctl_table_header *litmus_sysctls;
3756 +
3757 +static int __init init_color_shm_devices(void)
3758 +{
3759 + int err;
3760 +
3761 + printk(KERN_INFO "Registering LITMUS^RT color_shm devices.\n");
3762 + litmus_sysctls = register_sysctl_table(litmus_dir_table);
3763 + if (!litmus_sysctls) {
3764 + printk(KERN_WARNING "Could not register LITMUS^RT color_shm sysctl.\n");
3765 + err = -EFAULT;
3766 + }
3767 +
3768 + mutex_init(&dev_lock);
3769 + mutex_init(&bypass_mutex);
3770 + color_param.color = 0x00000000;
3771 + color_param.bank = 0x00000000;
3772 + color_offset.offset = 0xffffffff;
3773 + color_offset.lock = -1;
3774 + bypass_cache = 0;
3775 + err = misc_register(&litmus_color_shm_dev);
3776 +
3777 + return err;
3778 +}
3779 +
3780 +static void __exit exit_color_shm_devices(void)
3781 +{
3782 + misc_deregister(&litmus_color_shm_dev);
3783 + printk(KERN_INFO "Unregistering %s device.\n", DEV_NAME);
3784 +}
3785 +
3786 +module_init(init_color_shm_devices);
3787 +module_exit(exit_color_shm_devices);
3788 \ No newline at end of file
3789 diff --git litmus/jobs.c litmus/jobs.c
3790 index 0dd36b9..368e0b3 100644
3791 --- litmus/jobs.c
3792 +++ litmus/jobs.c
3793 @@ -8,6 +8,9 @@
3794 #include <litmus/sched_plugin.h>
3795 #include <litmus/jobs.h>
3796
3797 +extern int num_sync_released;
3798 +int n_init_phase;
3799 +
3800 static inline void setup_release(struct task_struct *t, lt_t release)
3801 {
3802 /* prepare next release */
3803 @@ -19,6 +22,8 @@ static inline void setup_release(struct task_struct *t, lt_t release)
3804 t->rt_param.job_params.job_no++;
3805 }
3806
3807 +#define INIT_PHASE_LENGTH_NS (1000000000)
3808 +
3809 void prepare_for_next_period(struct task_struct *t)
3810 {
3811 BUG_ON(!t);
3812 @@ -30,12 +35,13 @@ void prepare_for_next_period(struct task_struct *t)
3813 (long long)litmus_clock() -
3814 (long long)t->rt_param.job_params.deadline;
3815
3816 + /* Mode 0 is used for initializations *
3817 + * Use sporadic releases for all tasks not to overutilize cpus in mode 0 */
3818 if (tsk_rt(t)->sporadic_release) {
3819 TRACE_TASK(t, "sporadic release at %llu\n",
3820 - tsk_rt(t)->sporadic_release_time);
3821 + tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no));
3822 /* sporadic release */
3823 - setup_release(t, tsk_rt(t)->sporadic_release_time);
3824 - tsk_rt(t)->sporadic_release = 0;
3825 + setup_release(t, tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no));
3826 } else {
3827 /* periodic release => add period */
3828 setup_release(t, get_release(t) + get_rt_period(t));
3829 diff --git litmus/litmus.c litmus/litmus.c
3830 index db5ce0e9..400fd14 100644
3831 --- litmus/litmus.c
3832 +++ litmus/litmus.c
3833 @@ -14,6 +14,10 @@
3834 #include <linux/sched/rt.h>
3835 #include <linux/rwsem.h>
3836 #include <linux/interrupt.h>
3837 +#include <linux/migrate.h>
3838 +#include <linux/mm.h>
3839 +#include <linux/memcontrol.h>
3840 +#include <linux/mm_inline.h>
3841
3842 #include <litmus/litmus.h>
3843 #include <litmus/bheap.h>
3844 @@ -21,6 +25,8 @@
3845 #include <litmus/rt_domain.h>
3846 #include <litmus/litmus_proc.h>
3847 #include <litmus/sched_trace.h>
3848 +#include <litmus/cache_proc.h>
3849 +#include <litmus/mc2_common.h>
3850
3851 #ifdef CONFIG_SCHED_CPU_AFFINITY
3852 #include <litmus/affinity.h>
3853 @@ -31,6 +37,8 @@
3854 #include <trace/events/litmus.h>
3855 #endif
3856
3857 +extern void l2c310_flush_all(void);
3858 +
3859 /* Number of RT tasks that exist in the system */
3860 atomic_t rt_task_count = ATOMIC_INIT(0);
3861
3862 @@ -160,6 +168,14 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
3863 pid, tp.budget_policy);
3864 goto out_unlock;
3865 }
3866 +#ifdef CONFIG_PGMRT_SUPPORT
3867 + if (tp.pgm_type < PGM_NOT_A_NODE || tp.pgm_type > PGM_INTERNAL) {
3868 + printk(KERN_INFO "litmus: real-time task %d rejected "
3869 + "because of unknown PGM node type specified (%d)\n",
3870 + pid, tp.pgm_type);
3871 + goto out_unlock;
3872 + }
3873 +#endif
3874
3875 target->rt_param.task_params = tp;
3876
3877 @@ -314,6 +330,149 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
3878 return ret;
3879 }
3880
3881 +asmlinkage long sys_reservation_create(int type, void __user *config)
3882 +{
3883 + return litmus->reservation_create(type, config);
3884 +}
3885 +
3886 +asmlinkage long sys_reservation_destroy(unsigned int reservation_id, int cpu)
3887 +{
3888 + return litmus->reservation_destroy(reservation_id, cpu);
3889 +}
3890 +
3891 +static unsigned long color_mask;
3892 +
3893 +static inline unsigned long page_color(struct page *page)
3894 +{
3895 + return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT);
3896 +}
3897 +
3898 +extern int isolate_lru_page(struct page *page);
3899 +extern void putback_movable_page(struct page *page);
3900 +extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x);
3901 +
3902 +/*
3903 + * sys_set_page_color
3904 + * @cpu: CPU number to assign page colors.
3905 + * Syscall for recoloring pages
3906 + * Returns -1 on error.
3907 + * N on success. N is the number of pages that could not
3908 + * be moved. A return of zero means that all pages
3909 + * were successfully moved. Currently, two pages
3910 + * cannot be moved, signal handler and litmus ctrl
3911 + * pages.
3912 + * Only mc2 tasks may be configured with this system call.
3913 + * Use static linking to isolate all pages.
3914 + */
3915 +asmlinkage long sys_set_page_color(int cpu)
3916 +{
3917 + long ret = 0;
3918 + struct vm_area_struct *vma_itr = NULL;
3919 + int nr_pages = 0, nr_failed = 0, nr_not_migrated = 0;
3920 + unsigned long node;
3921 + enum crit_level lv;
3922 + struct mm_struct *mm;
3923 +
3924 + LIST_HEAD(pagelist);
3925 + LIST_HEAD(shared_pagelist);
3926 +
3927 + migrate_prep();
3928 +
3929 + rcu_read_lock();
3930 + get_task_struct(current);
3931 + rcu_read_unlock();
3932 + mm = get_task_mm(current);
3933 + put_task_struct(current);
3934 +
3935 + down_read(&mm->mmap_sem);
3936 + vma_itr = mm->mmap;
3937 + while (vma_itr != NULL) {
3938 + unsigned int num_pages = 0, i;
3939 + struct page *old_page = NULL;
3940 +
3941 + num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE;
3942 + for (i = 0; i < num_pages; i++) {
3943 + old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT);
3944 +
3945 + if (IS_ERR(old_page))
3946 + continue;
3947 + if (!old_page)
3948 + continue;
3949 +
3950 + if (PageReserved(old_page)) {
3951 + TRACE("Reserved Page!\n");
3952 + put_page(old_page);
3953 + continue;
3954 + }
3955 + ret = isolate_lru_page(old_page);
3956 + if (!ret) {
3957 + list_add_tail(&old_page->lru, &pagelist);
3958 + inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
3959 + nr_pages++;
3960 + } else {
3961 + nr_failed++;
3962 + }
3963 + put_page(old_page);
3964 + }
3965 +
3966 + vma_itr = vma_itr->vm_next;
3967 + }
3968 +
3969 + ret = 0;
3970 + if (tsk_rt(current)->mc2_data)
3971 + lv = tsk_rt(current)->mc2_data->crit;
3972 + else
3973 + BUG(); //lv = 0;
3974 +
3975 + if (cpu == -1)
3976 + node = 8;
3977 + else
3978 + node = cpu*2 + lv;
3979 +
3980 + if (!list_empty(&pagelist)) {
3981 + ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
3982 + TRACE_TASK(current, "%ld pages not migrated.\n", ret);
3983 + nr_not_migrated = ret;
3984 + if (ret) {
3985 + putback_movable_pages(&pagelist);
3986 + }
3987 + }
3988 +
3989 + up_read(&mm->mmap_sem);
3990 +
3991 + TRACE_TASK(current, "node = %ld, nr_migrated_pages = %d, nr_pages = %d nr_failed = %d\n", node, nr_pages-nr_not_migrated, nr_pages, nr_failed);
3992 +
3993 + return ret;
3994 +}
3995 +
3996 +/* sys_test_call() is a test system call for debugging */
3997 +asmlinkage long sys_test_call(unsigned int param)
3998 +{
3999 + long ret = 0;
4000 + struct vm_area_struct *vma_itr = NULL;
4001 +
4002 + TRACE_CUR("test_call param = %d\n", param);
4003 +
4004 + if (param == 0) {
4005 + /* Print page information */
4006 + down_read(¤t->mm->mmap_sem);
4007 + vma_itr = current->mm->mmap;
4008 + while (vma_itr != NULL) {
4009 + printk(KERN_INFO "--------------------------------------------\n");
4010 + printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start);
4011 + printk(KERN_INFO "vm_end : %lx\n", vma_itr->vm_end);
4012 + printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags);
4013 + printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot));
4014 + printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED);
4015 + vma_itr = vma_itr->vm_next;
4016 + }
4017 + printk(KERN_INFO "--------------------------------------------\n");
4018 + up_read(¤t->mm->mmap_sem);
4019 + }
4020 +
4021 + return ret;
4022 +}
4023 +
4024 /* p is a real-time task. Re-init its state as a best-effort task. */
4025 static void reinit_litmus_state(struct task_struct* p, int restore)
4026 {
4027 @@ -651,6 +810,12 @@ static int __init _init_litmus(void)
4028 * mode change lock is used to enforce single mode change
4029 * operation.
4030 */
4031 +#if defined(CONFIG_CPU_V7)
4032 + unsigned int line_size_log = 5; // 2^5 = 32 byte
4033 + unsigned int cache_info_sets = 2048; // 64KB (way_size) / 32B (line_size) = 2048
4034 + printk("LITMIS^RT-ARM kernel\n");
4035 +#endif
4036 +
4037 printk("Starting LITMUS^RT kernel\n");
4038
4039 register_sched_plugin(&linux_sched_plugin);
4040 @@ -665,11 +830,15 @@ static int __init _init_litmus(void)
4041 else
4042 printk("Could not register kill rt tasks magic sysrq.\n");
4043 #endif
4044 -
4045 init_litmus_proc();
4046
4047 register_reboot_notifier(&shutdown_notifier);
4048
4049 +#if defined(CONFIG_CPU_V7)
4050 + color_mask = ((cache_info_sets << line_size_log) - 1) ^ (PAGE_SIZE - 1);
4051 + printk("Page color mask %lx\n", color_mask);
4052 +#endif
4053 +
4054 return 0;
4055 }
4056
4057 diff --git litmus/mc2_common.c litmus/mc2_common.c
4058 new file mode 100644
4059 index 0000000..322cc6d
4060 --- /dev/null
4061 +++ litmus/mc2_common.c
4062 @@ -0,0 +1,80 @@
4063 +/*
4064 + * litmus/mc2_common.c
4065 + *
4066 + * Common functions for MC2 plugin.
4067 + */
4068 +
4069 +#include <linux/percpu.h>
4070 +#include <linux/sched.h>
4071 +#include <linux/list.h>
4072 +#include <linux/slab.h>
4073 +#include <asm/uaccess.h>
4074 +
4075 +#include <litmus/litmus.h>
4076 +#include <litmus/sched_plugin.h>
4077 +#include <litmus/sched_trace.h>
4078 +
4079 +#include <litmus/mc2_common.h>
4080 +
4081 +long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk, struct reservation *res)
4082 +{
4083 + task_client_init(tc, tsk, res);
4084 + if ((mc2_param->crit < CRIT_LEVEL_A) ||
4085 + (mc2_param->crit > CRIT_LEVEL_C))
4086 + return -EINVAL;
4087 +
4088 + TRACE_TASK(tsk, "mc2_task_client_init: mode = %d, crit_level = %d\n", res->mode, mc2_param->crit);
4089 +
4090 + return 0;
4091 +}
4092 +
4093 +asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param)
4094 +{
4095 + struct task_struct *target;
4096 + int retval = -EINVAL;
4097 + struct mc2_task *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
4098 +
4099 + if (!mp)
4100 + return -ENOMEM;
4101 +
4102 + printk("Setting up mc^2 task parameters for process %d.\n", pid);
4103 +
4104 + if (pid < 0 || param == 0) {
4105 + goto out;
4106 + }
4107 + if (copy_from_user(mp, param, sizeof(*mp))) {
4108 + retval = -EFAULT;
4109 + goto out;
4110 + }
4111 +
4112 + /* Task search and manipulation must be protected */
4113 + read_lock_irq(&tasklist_lock);
4114 + if (!(target = find_task_by_vpid(pid))) {
4115 + retval = -ESRCH;
4116 + goto out_unlock;
4117 + }
4118 +
4119 + if (is_realtime(target)) {
4120 + /* The task is already a real-time task.
4121 + * We cannot not allow parameter changes at this point.
4122 + */
4123 + retval = -EBUSY;
4124 + goto out_unlock;
4125 + }
4126 + if (mp->crit < CRIT_LEVEL_A || mp->crit >= NUM_CRIT_LEVELS) {
4127 + printk(KERN_INFO "litmus: real-time task %d rejected "
4128 + "because of invalid criticality level\n", pid);
4129 + goto out_unlock;
4130 + }
4131 +
4132 + mp->init_finished = 0;
4133 + //target->rt_param.plugin_state = mp;
4134 + target->rt_param.mc2_data = mp;
4135 +
4136 + retval = 0;
4137 +
4138 +out_unlock:
4139 + read_unlock_irq(&tasklist_lock);
4140 +out:
4141 + return retval;
4142 +}
4143 \ No newline at end of file
4144 diff --git litmus/polling_reservations.c litmus/polling_reservations.c
4145 new file mode 100644
4146 index 0000000..c6e10eb
4147 --- /dev/null
4148 +++ litmus/polling_reservations.c
4149 @@ -0,0 +1,566 @@
4150 +#include <linux/sched.h>
4151 +
4152 +#include <litmus/litmus.h>
4153 +#include <litmus/reservation.h>
4154 +#include <litmus/polling_reservations.h>
4155 +
4156 +//#define TRACE(fmt, args...) do {} while (false)
4157 +//#define TRACE_TASK(fmt, args...) do {} while (false)
4158 +
4159 +static void periodic_polling_client_arrives(
4160 + struct reservation* res,
4161 + struct reservation_client *client
4162 +)
4163 +{
4164 + struct polling_reservation *pres =
4165 + container_of(res, struct polling_reservation, res);
4166 + lt_t instances, tmp;
4167 +
4168 + list_add_tail(&client->list, &res->clients);
4169 +
4170 + switch (res->state) {
4171 + case RESERVATION_INACTIVE:
4172 + /* Figure out next replenishment time. */
4173 + if (res->env->time_zero == 0) {
4174 + tmp = res->env->current_time - res->env->time_zero;
4175 + instances = div64_u64(tmp, pres->period);
4176 + res->next_replenishment =
4177 + (instances + 1) * pres->period + pres->offset;
4178 + }
4179 + else {
4180 + tmp = res->env->current_time - res->env->time_zero;
4181 + instances = div64_u64(tmp, pres->period);
4182 + res->next_replenishment = res->env->time_zero + instances * pres->period;
4183 + }
4184 +
4185 + TRACE("ENV_TIME_ZERO %llu in mode %d\n", res->env->time_zero, res->mode);
4186 + TRACE("pol-res: mode %d R%d activate tmp=%llu instances=%llu period=%llu nextrp=%llu cur=%llu\n",
4187 + res->mode, res->id, tmp, instances, pres->period, res->next_replenishment,
4188 + res->env->current_time);
4189 +
4190 + res->env->change_state(res->env, res,
4191 + RESERVATION_DEPLETED);
4192 + break;
4193 +
4194 + case RESERVATION_ACTIVE:
4195 + case RESERVATION_DEPLETED:
4196 + /* do nothing */
4197 + break;
4198 +
4199 + case RESERVATION_ACTIVE_IDLE:
4200 + res->blocked_by_ghost = 0;
4201 + res->env->change_state(res->env, res,
4202 + RESERVATION_ACTIVE);
4203 + break;
4204 + }
4205 +}
4206 +
4207 +
4208 +static void periodic_polling_client_departs(
4209 + struct reservation *res,
4210 + struct reservation_client *client,
4211 + int did_signal_job_completion
4212 +)
4213 +{
4214 + list_del(&client->list);
4215 +
4216 + switch (res->state) {
4217 + case RESERVATION_INACTIVE:
4218 + case RESERVATION_ACTIVE_IDLE:
4219 + BUG(); /* INACTIVE or IDLE <=> no client */
4220 + break;
4221 +
4222 + case RESERVATION_ACTIVE:
4223 + if (list_empty(&res->clients)) {
4224 + res->env->change_state(res->env, res,
4225 +// RESERVATION_ACTIVE_IDLE);
4226 + res->cur_budget ?
4227 + RESERVATION_ACTIVE_IDLE :
4228 + RESERVATION_DEPLETED);
4229 +// did_signal_job_completion ?
4230 +// RESERVATION_DEPLETED :
4231 +// RESERVATION_ACTIVE_IDLE);
4232 + } /* else: nothing to do, more clients ready */
4233 + break;
4234 +
4235 + case RESERVATION_DEPLETED:
4236 + /* do nothing */
4237 + break;
4238 + }
4239 +}
4240 +
4241 +static void periodic_polling_on_replenishment(
4242 + struct reservation *res
4243 +)
4244 +{
4245 + struct polling_reservation *pres =
4246 + container_of(res, struct polling_reservation, res);
4247 +
4248 + /* replenish budget */
4249 + res->cur_budget = pres->max_budget;
4250 + res->next_replenishment += pres->period;
4251 + res->budget_consumed = 0;
4252 +
4253 + TRACE("polling_replenish(%u): next_replenishment=%llu\n", res->id, res->next_replenishment);
4254 + switch (res->state) {
4255 + case RESERVATION_DEPLETED:
4256 + case RESERVATION_INACTIVE:
4257 + case RESERVATION_ACTIVE_IDLE:
4258 + if (list_empty(&res->clients))
4259 + /* no clients => poll again later */
4260 + res->env->change_state(res->env, res,
4261 + RESERVATION_INACTIVE);
4262 + else
4263 + /* we have clients & budget => ACTIVE */
4264 + res->env->change_state(res->env, res,
4265 + RESERVATION_ACTIVE);
4266 + break;
4267 +
4268 + case RESERVATION_ACTIVE:
4269 + /* Replenished while active => tardy? In any case,
4270 + * go ahead and stay active. */
4271 + break;
4272 + }
4273 +}
4274 +
4275 +static void periodic_polling_on_replenishment_edf(
4276 + struct reservation *res
4277 +)
4278 +{
4279 + struct polling_reservation *pres =
4280 + container_of(res, struct polling_reservation, res);
4281 +
4282 + /* update current priority */
4283 + res->priority = res->next_replenishment + pres->deadline;
4284 +
4285 + /* do common updates */
4286 + periodic_polling_on_replenishment(res);
4287 +}
4288 +
4289 +static void common_drain_budget(
4290 + struct reservation *res,
4291 + lt_t how_much)
4292 +{
4293 + if (how_much >= res->cur_budget)
4294 + res->cur_budget = 0;
4295 + else
4296 + res->cur_budget -= how_much;
4297 +
4298 + res->budget_consumed += how_much;
4299 + res->budget_consumed_total += how_much;
4300 +
4301 + switch (res->state) {
4302 + case RESERVATION_DEPLETED:
4303 + case RESERVATION_INACTIVE:
4304 + //BUG();
4305 + TRACE("!!!!!!!!!!!!!!!STATE ERROR R%d STATE(%d)\n", res->id, res->state);
4306 + break;
4307 +
4308 + case RESERVATION_ACTIVE_IDLE:
4309 + case RESERVATION_ACTIVE:
4310 + if (!res->cur_budget) {
4311 + res->env->change_state(res->env, res,
4312 + RESERVATION_DEPLETED);
4313 + } /* else: stay in current state */
4314 + break;
4315 + }
4316 +}
4317 +
4318 +static struct reservation_ops periodic_polling_ops_fp = {
4319 + .dispatch_client = default_dispatch_client,
4320 + .client_arrives = periodic_polling_client_arrives,
4321 + .client_departs = periodic_polling_client_departs,
4322 + .replenish = periodic_polling_on_replenishment,
4323 + .drain_budget = common_drain_budget,
4324 +};
4325 +
4326 +static struct reservation_ops periodic_polling_ops_edf = {
4327 + .dispatch_client = default_dispatch_client,
4328 + .client_arrives = periodic_polling_client_arrives,
4329 + .client_departs = periodic_polling_client_departs,
4330 + .replenish = periodic_polling_on_replenishment_edf,
4331 + .drain_budget = common_drain_budget,
4332 +};
4333 +
4334 +
4335 +
4336 +
4337 +static void sporadic_polling_client_arrives_fp(
4338 + struct reservation* res,
4339 + struct reservation_client *client
4340 +)
4341 +{
4342 + struct polling_reservation *pres =
4343 + container_of(res, struct polling_reservation, res);
4344 +
4345 + list_add_tail(&client->list, &res->clients);
4346 +
4347 + switch (res->state) {
4348 + case RESERVATION_INACTIVE:
4349 + /* Replenish now. */
4350 + res->cur_budget = pres->max_budget;
4351 + res->next_replenishment =
4352 + res->env->current_time + pres->period;
4353 +
4354 + res->env->change_state(res->env, res,
4355 + RESERVATION_ACTIVE);
4356 + break;
4357 +
4358 + case RESERVATION_ACTIVE:
4359 + case RESERVATION_DEPLETED:
4360 + /* do nothing */
4361 + break;
4362 +
4363 + case RESERVATION_ACTIVE_IDLE:
4364 + res->env->change_state(res->env, res,
4365 + RESERVATION_ACTIVE);
4366 + break;
4367 + }
4368 +}
4369 +
4370 +static void sporadic_polling_client_arrives_edf(
4371 + struct reservation* res,
4372 + struct reservation_client *client
4373 +)
4374 +{
4375 + struct polling_reservation *pres =
4376 + container_of(res, struct polling_reservation, res);
4377 +
4378 + list_add_tail(&client->list, &res->clients);
4379 +
4380 + switch (res->state) {
4381 + case RESERVATION_INACTIVE:
4382 + /* Replenish now. */
4383 + res->cur_budget = pres->max_budget;
4384 + res->next_replenishment =
4385 + res->env->current_time + pres->period;
4386 + res->priority =
4387 + res->env->current_time + pres->deadline;
4388 +
4389 + res->env->change_state(res->env, res,
4390 + RESERVATION_ACTIVE);
4391 + break;
4392 +
4393 + case RESERVATION_ACTIVE:
4394 + case RESERVATION_DEPLETED:
4395 + /* do nothing */
4396 + break;
4397 +
4398 + case RESERVATION_ACTIVE_IDLE:
4399 + res->env->change_state(res->env, res,
4400 + RESERVATION_ACTIVE);
4401 + break;
4402 + }
4403 +}
4404 +
4405 +static struct reservation_ops sporadic_polling_ops_fp = {
4406 + .dispatch_client = default_dispatch_client,
4407 + .client_arrives = sporadic_polling_client_arrives_fp,
4408 + .client_departs = periodic_polling_client_departs,
4409 + .replenish = periodic_polling_on_replenishment,
4410 + .drain_budget = common_drain_budget,
4411 +};
4412 +
4413 +static struct reservation_ops sporadic_polling_ops_edf = {
4414 + .dispatch_client = default_dispatch_client,
4415 + .client_arrives = sporadic_polling_client_arrives_edf,
4416 + .client_departs = periodic_polling_client_departs,
4417 + .replenish = periodic_polling_on_replenishment_edf,
4418 + .drain_budget = common_drain_budget,
4419 +};
4420 +
4421 +void polling_reservation_init(
4422 + struct polling_reservation *pres,
4423 + int use_edf_prio,
4424 + int use_periodic_polling,
4425 + lt_t budget, lt_t period, lt_t deadline, lt_t offset
4426 +)
4427 +{
4428 + if (!deadline)
4429 + deadline = period;
4430 + BUG_ON(budget > period);
4431 + BUG_ON(budget > deadline);
4432 + BUG_ON(offset >= period);
4433 +
4434 + reservation_init(&pres->res);
4435 + pres->max_budget = budget;
4436 + pres->period = period;
4437 + pres->deadline = deadline;
4438 + pres->offset = offset;
4439 + TRACE_TASK(current, "polling_reservation_init: periodic %d, use_edf %d\n", use_periodic_polling, use_edf_prio);
4440 + if (use_periodic_polling) {
4441 + if (use_edf_prio)
4442 + pres->res.ops = &periodic_polling_ops_edf;
4443 + else
4444 + pres->res.ops = &periodic_polling_ops_fp;
4445 + } else {
4446 + if (use_edf_prio)
4447 + pres->res.ops = &sporadic_polling_ops_edf;
4448 + else
4449 + pres->res.ops = &sporadic_polling_ops_fp;
4450 + }
4451 +}
4452 +
4453 +
4454 +static lt_t td_cur_major_cycle_start(struct table_driven_reservation *tdres)
4455 +{
4456 + lt_t x, tmp;
4457 +
4458 + tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
4459 + x = div64_u64(tmp, tdres->major_cycle);
4460 + x *= tdres->major_cycle;
4461 + return x;
4462 +}
4463 +
4464 +
4465 +static lt_t td_next_major_cycle_start(struct table_driven_reservation *tdres)
4466 +{
4467 + lt_t x, tmp;
4468 +
4469 + tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
4470 + x = div64_u64(tmp, tdres->major_cycle) + 1;
4471 + x *= tdres->major_cycle;
4472 + return x;
4473 +}
4474 +
4475 +static void td_client_arrives(
4476 + struct reservation* res,
4477 + struct reservation_client *client
4478 +)
4479 +{
4480 + struct table_driven_reservation *tdres =
4481 + container_of(res, struct table_driven_reservation, res);
4482 +
4483 + list_add_tail(&client->list, &res->clients);
4484 +
4485 + switch (res->state) {
4486 + case RESERVATION_INACTIVE:
4487 + /* Figure out first replenishment time. */
4488 + tdres->major_cycle_start = td_next_major_cycle_start(tdres);
4489 + res->next_replenishment = tdres->major_cycle_start;
4490 + res->next_replenishment += tdres->intervals[0].start;
4491 + tdres->next_interval = 0;
4492 +
4493 + res->env->change_state(res->env, res,
4494 + RESERVATION_DEPLETED);
4495 + break;
4496 +
4497 + case RESERVATION_ACTIVE:
4498 + case RESERVATION_DEPLETED:
4499 + /* do nothing */
4500 + break;
4501 +
4502 + case RESERVATION_ACTIVE_IDLE:
4503 + res->env->change_state(res->env, res,
4504 + RESERVATION_ACTIVE);
4505 + break;
4506 + }
4507 +}
4508 +
4509 +static void td_client_departs(
4510 + struct reservation *res,
4511 + struct reservation_client *client,
4512 + int did_signal_job_completion
4513 +)
4514 +{
4515 + list_del(&client->list);
4516 +
4517 + switch (res->state) {
4518 + case RESERVATION_INACTIVE:
4519 + case RESERVATION_ACTIVE_IDLE:
4520 + //BUG(); /* INACTIVE or IDLE <=> no client */
4521 + break;
4522 +
4523 + case RESERVATION_ACTIVE:
4524 + if (list_empty(&res->clients)) {
4525 + res->env->change_state(res->env, res,
4526 + RESERVATION_ACTIVE_IDLE);
4527 + } /* else: nothing to do, more clients ready */
4528 + break;
4529 +
4530 + case RESERVATION_DEPLETED:
4531 + /* do nothing */
4532 + break;
4533 + }
4534 +}
4535 +
4536 +static lt_t td_time_remaining_until_end(struct table_driven_reservation *tdres)
4537 +{
4538 + lt_t now = tdres->res.env->current_time;
4539 + lt_t end = tdres->cur_interval.end;
4540 + //TRACE("td_remaining(%u): start=%llu now=%llu end=%llu state=%d\n", tdres->res.id, tdres->cur_interval.start, now, end, tdres->res.state);
4541 + if (now >= end)
4542 + return 0;
4543 + else
4544 + return end - now;
4545 +}
4546 +
4547 +static void td_replenish(
4548 + struct reservation *res)
4549 +{
4550 + struct table_driven_reservation *tdres =
4551 + container_of(res, struct table_driven_reservation, res);
4552 +
4553 + //TRACE("td_replenish(%u): expected_replenishment=%llu\n", res->id, res->next_replenishment);
4554 +
4555 + /* figure out current interval */
4556 + tdres->cur_interval.start = tdres->major_cycle_start +
4557 + tdres->intervals[tdres->next_interval].start;
4558 + tdres->cur_interval.end = tdres->major_cycle_start +
4559 + tdres->intervals[tdres->next_interval].end;
4560 +/* TRACE("major_cycle_start=%llu => [%llu, %llu]\n",
4561 + tdres->major_cycle_start,
4562 + tdres->cur_interval.start,
4563 + tdres->cur_interval.end);
4564 +*/
4565 + /* reset budget */
4566 + res->cur_budget = td_time_remaining_until_end(tdres);
4567 + res->budget_consumed = 0;
4568 + //TRACE("td_replenish(%u): %s budget=%llu\n", res->id, res->cur_budget ? "" : "WARNING", res->cur_budget);
4569 +
4570 + /* prepare next slot */
4571 + tdres->next_interval = (tdres->next_interval + 1) % tdres->num_intervals;
4572 + if (!tdres->next_interval)
4573 + /* wrap to next major cycle */
4574 + tdres->major_cycle_start += tdres->major_cycle;
4575 +
4576 + /* determine next time this reservation becomes eligible to execute */
4577 + res->next_replenishment = tdres->major_cycle_start;
4578 + res->next_replenishment += tdres->intervals[tdres->next_interval].start;
4579 + //TRACE("td_replenish(%u): next_replenishment=%llu\n", res->id, res->next_replenishment);
4580 +
4581 +
4582 + switch (res->state) {
4583 + case RESERVATION_DEPLETED:
4584 + case RESERVATION_ACTIVE:
4585 + case RESERVATION_ACTIVE_IDLE:
4586 + if (list_empty(&res->clients))
4587 + res->env->change_state(res->env, res,
4588 + RESERVATION_ACTIVE_IDLE);
4589 + else
4590 + /* we have clients & budget => ACTIVE */
4591 + res->env->change_state(res->env, res,
4592 + RESERVATION_ACTIVE);
4593 + break;
4594 +
4595 + case RESERVATION_INACTIVE:
4596 + BUG();
4597 + break;
4598 + }
4599 +}
4600 +
4601 +static void td_drain_budget(
4602 + struct reservation *res,
4603 + lt_t how_much)
4604 +{
4605 + struct table_driven_reservation *tdres =
4606 + container_of(res, struct table_driven_reservation, res);
4607 +
4608 + res->budget_consumed += how_much;
4609 + res->budget_consumed_total += how_much;
4610 +
4611 + /* Table-driven scheduling: instead of tracking the budget, we compute
4612 + * how much time is left in this allocation interval. */
4613 +
4614 + /* sanity check: we should never try to drain from future slots */
4615 + //TRACE("TD_DRAIN STATE(%d) [%llu,%llu] %llu ?\n", res->state, tdres->cur_interval.start, tdres->cur_interval.end, res->env->current_time);
4616 + //BUG_ON(tdres->cur_interval.start > res->env->current_time);
4617 + if (tdres->cur_interval.start > res->env->current_time)
4618 + TRACE("TD_DRAIN BUG!!!!!!!!!!\n");
4619 +
4620 + switch (res->state) {
4621 + case RESERVATION_DEPLETED:
4622 + case RESERVATION_INACTIVE:
4623 + //BUG();
4624 + TRACE("TD_DRAIN!!!!!!!!! RES_STATE = %d\n", res->state);
4625 + break;
4626 +
4627 + case RESERVATION_ACTIVE_IDLE:
4628 + case RESERVATION_ACTIVE:
4629 + res->cur_budget = td_time_remaining_until_end(tdres);
4630 + //TRACE("td_drain_budget(%u): drained to budget=%llu\n", res->id, res->cur_budget);
4631 + if (!res->cur_budget) {
4632 + res->env->change_state(res->env, res,
4633 + RESERVATION_DEPLETED);
4634 + } else {
4635 + /* sanity check budget calculation */
4636 + //BUG_ON(res->env->current_time >= tdres->cur_interval.end);
4637 + //BUG_ON(res->env->current_time < tdres->cur_interval.start);
4638 + if (res->env->current_time >= tdres->cur_interval.end)
4639 + printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING1\n");
4640 + if (res->env->current_time < tdres->cur_interval.start)
4641 + printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING2\n");
4642 + }
4643 +
4644 + break;
4645 + }
4646 +}
4647 +
4648 +static struct task_struct* td_dispatch_client(
4649 + struct reservation *res,
4650 + lt_t *for_at_most)
4651 +{
4652 + struct task_struct *t;
4653 + struct table_driven_reservation *tdres =
4654 + container_of(res, struct table_driven_reservation, res);
4655 +
4656 + /* usual logic for selecting a client */
4657 + t = default_dispatch_client(res, for_at_most);
4658 +
4659 + TRACE_TASK(t, "td_dispatch_client(%u): selected, budget=%llu\n",
4660 + res->id, res->cur_budget);
4661 +
4662 + /* check how much budget we have left in this time slot */
4663 + res->cur_budget = td_time_remaining_until_end(tdres);
4664 +
4665 + TRACE_TASK(t, "td_dispatch_client(%u): updated to budget=%llu next=%d\n",
4666 + res->id, res->cur_budget, tdres->next_interval);
4667 +
4668 + if (unlikely(!res->cur_budget)) {
4669 + /* Unlikely case: if we ran out of budget, the user configured
4670 + * a broken scheduling table (overlapping table slots).
4671 + * Not much we can do about this, but we can't dispatch a job
4672 + * now without causing overload. So let's register this reservation
4673 + * as depleted and wait for the next allocation. */
4674 + TRACE("td_dispatch_client(%u): budget unexpectedly depleted "
4675 + "(check scheduling table for unintended overlap)\n",
4676 + res->id);
4677 + res->env->change_state(res->env, res,
4678 + RESERVATION_DEPLETED);
4679 + return NULL;
4680 + } else
4681 + return t;
4682 +}
4683 +
4684 +static struct reservation_ops td_ops = {
4685 + .dispatch_client = td_dispatch_client,
4686 + .client_arrives = td_client_arrives,
4687 + .client_departs = td_client_departs,
4688 + .replenish = td_replenish,
4689 + .drain_budget = td_drain_budget,
4690 +};
4691 +
4692 +void table_driven_reservation_init(
4693 + struct table_driven_reservation *tdres,
4694 + lt_t major_cycle,
4695 + struct lt_interval *intervals,
4696 + unsigned int num_intervals)
4697 +{
4698 + unsigned int i;
4699 +
4700 + /* sanity checking */
4701 + BUG_ON(!num_intervals);
4702 + for (i = 0; i < num_intervals; i++)
4703 + BUG_ON(intervals[i].end <= intervals[i].start);
4704 + for (i = 0; i + 1 < num_intervals; i++)
4705 + BUG_ON(intervals[i + 1].start <= intervals[i].end);
4706 + BUG_ON(intervals[num_intervals - 1].end > major_cycle);
4707 +
4708 + reservation_init(&tdres->res);
4709 + tdres->major_cycle = major_cycle;
4710 + tdres->intervals = intervals;
4711 + tdres->cur_interval.start = 0;
4712 + tdres->cur_interval.end = 0;
4713 + tdres->num_intervals = num_intervals;
4714 + tdres->res.ops = &td_ops;
4715 +}
4716 diff --git litmus/reservation.c litmus/reservation.c
4717 new file mode 100644
4718 index 0000000..e3381d6
4719 --- /dev/null
4720 +++ litmus/reservation.c
4721 @@ -0,0 +1,710 @@
4722 +#include <linux/sched.h>
4723 +#include <linux/slab.h>
4724 +
4725 +#include <litmus/litmus.h>
4726 +#include <litmus/reservation.h>
4727 +
4728 +//#define TRACE(fmt, args...) do {} while (false)
4729 +//#define TRACE_TASK(fmt, args...) do {} while (false)
4730 +
4731 +#define BUDGET_ENFORCEMENT_AT_C 0
4732 +
4733 +void reservation_init(struct reservation *res)
4734 +{
4735 + memset(res, sizeof(*res), 0);
4736 + res->state = RESERVATION_INACTIVE;
4737 + INIT_LIST_HEAD(&res->clients);
4738 +}
4739 +
4740 +struct task_struct* default_dispatch_client(
4741 + struct reservation *res,
4742 + lt_t *for_at_most)
4743 +{
4744 + struct reservation_client *client, *next;
4745 + struct task_struct* tsk;
4746 +
4747 + BUG_ON(res->state != RESERVATION_ACTIVE);
4748 + *for_at_most = 0;
4749 +
4750 + list_for_each_entry_safe(client, next, &res->clients, list) {
4751 + tsk = client->dispatch(client);
4752 + if (likely(tsk)) {
4753 + return tsk;
4754 + }
4755 + }
4756 + return NULL;
4757 +}
4758 +
4759 +static struct task_struct * task_client_dispatch(struct reservation_client *client)
4760 +{
4761 + struct task_client *tc = container_of(client, struct task_client, client);
4762 + return tc->task;
4763 +}
4764 +
4765 +void task_client_init(struct task_client *tc, struct task_struct *tsk,
4766 + struct reservation *res)
4767 +{
4768 + memset(&tc->client, sizeof(tc->client), 0);
4769 + tc->client.dispatch = task_client_dispatch;
4770 + tc->client.reservation = res;
4771 + tc->task = tsk;
4772 +}
4773 +
4774 +static void sup_scheduler_update_at(
4775 + struct sup_reservation_environment* sup_env,
4776 + lt_t when)
4777 +{
4778 + //TRACE("SCHEDULER_UPDATE_AT update: %llu > when %llu\n", sup_env->next_scheduler_update, when);
4779 + if (sup_env->next_scheduler_update > when)
4780 + sup_env->next_scheduler_update = when;
4781 +}
4782 +
4783 +void sup_scheduler_update_after(
4784 + struct sup_reservation_environment* sup_env,
4785 + lt_t timeout)
4786 +{
4787 + sup_scheduler_update_at(sup_env, sup_env->env.current_time + timeout);
4788 +}
4789 +
4790 +static int _sup_queue_depleted(
4791 + struct sup_reservation_environment* sup_env,
4792 + struct reservation *res)
4793 +{
4794 + struct list_head *pos;
4795 + struct reservation *queued;
4796 + int passed_earlier = 0;
4797 +
4798 + list_for_each(pos, &sup_env->depleted_reservations) {
4799 + queued = list_entry(pos, struct reservation, list);
4800 + if (queued->next_replenishment > res->next_replenishment) {
4801 + list_add(&res->list, pos->prev);
4802 + return passed_earlier;
4803 + } else
4804 + passed_earlier = 1;
4805 + }
4806 +
4807 + list_add_tail(&res->list, &sup_env->depleted_reservations);
4808 +
4809 + return passed_earlier;
4810 +}
4811 +
4812 +static void sup_queue_depleted(
4813 + struct sup_reservation_environment* sup_env,
4814 + struct reservation *res)
4815 +{
4816 + int passed_earlier = _sup_queue_depleted(sup_env, res);
4817 +
4818 + /* check for updated replenishment time */
4819 + if (!passed_earlier)
4820 + sup_scheduler_update_at(sup_env, res->next_replenishment);
4821 +}
4822 +
4823 +static int _sup_queue_active(
4824 + struct sup_reservation_environment* sup_env,
4825 + struct reservation *res)
4826 +{
4827 + struct list_head *pos;
4828 + struct reservation *queued;
4829 + int passed_active = 0;
4830 +
4831 + list_for_each(pos, &sup_env->active_reservations) {
4832 + queued = list_entry(pos, struct reservation, list);
4833 + if (queued->priority > res->priority) {
4834 + list_add(&res->list, pos->prev);
4835 + return passed_active;
4836 + } else if (queued->state == RESERVATION_ACTIVE)
4837 + passed_active = 1;
4838 + }
4839 +
4840 + list_add_tail(&res->list, &sup_env->active_reservations);
4841 + return passed_active;
4842 +}
4843 +
4844 +static void sup_queue_active(
4845 + struct sup_reservation_environment* sup_env,
4846 + struct reservation *res)
4847 +{
4848 + int passed_active = _sup_queue_active(sup_env, res);
4849 +
4850 + /* check for possible preemption */
4851 + if (res->state == RESERVATION_ACTIVE && !passed_active)
4852 + sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
4853 + else {
4854 + /* Active means this reservation is draining budget => make sure
4855 + * the scheduler is called to notice when the reservation budget has been
4856 + * drained completely. */
4857 + sup_scheduler_update_after(sup_env, res->cur_budget);
4858 + }
4859 +}
4860 +
4861 +static void sup_queue_reservation(
4862 + struct sup_reservation_environment* sup_env,
4863 + struct reservation *res)
4864 +{
4865 + switch (res->state) {
4866 + case RESERVATION_INACTIVE:
4867 + list_add(&res->list, &sup_env->inactive_reservations);
4868 + break;
4869 +
4870 + case RESERVATION_DEPLETED:
4871 + sup_queue_depleted(sup_env, res);
4872 + break;
4873 +
4874 + case RESERVATION_ACTIVE_IDLE:
4875 + case RESERVATION_ACTIVE:
4876 + sup_queue_active(sup_env, res);
4877 + break;
4878 + }
4879 +}
4880 +
4881 +void sup_add_new_reservation(
4882 + struct sup_reservation_environment* sup_env,
4883 + struct reservation* new_res)
4884 +{
4885 + new_res->env = &sup_env->env;
4886 + sup_queue_reservation(sup_env, new_res);
4887 +}
4888 +
4889 +struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
4890 + unsigned int id)
4891 +{
4892 + struct reservation *res;
4893 + BUG_ON(!sup_env);
4894 + list_for_each_entry(res, &sup_env->active_reservations, list) {
4895 + if (res->id == id)
4896 + return res;
4897 + }
4898 + list_for_each_entry(res, &sup_env->inactive_reservations, list) {
4899 + if (res->id == id)
4900 + return res;
4901 + }
4902 + list_for_each_entry(res, &sup_env->depleted_reservations, list) {
4903 + if (res->id == id)
4904 + return res;
4905 + }
4906 +
4907 + return NULL;
4908 +}
4909 +
4910 +static void sup_charge_budget(
4911 + struct sup_reservation_environment* sup_env,
4912 + lt_t delta)
4913 +{
4914 + struct list_head *pos, *next;
4915 + struct reservation *res;
4916 +
4917 + int encountered_active = 0;
4918 +
4919 + list_for_each_safe(pos, next, &sup_env->active_reservations) {
4920 + /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
4921 + res = list_entry(pos, struct reservation, list);
4922 + if (res->state == RESERVATION_ACTIVE) {
4923 + //TRACE("sup_charge_budget ACTIVE R%u drain %llu\n", res->id, delta);
4924 + if (encountered_active == 0 && res->blocked_by_ghost == 0) {
4925 + //TRACE("DRAIN !!\n");
4926 + res->ops->drain_budget(res, delta);
4927 + encountered_active = 1;
4928 + }
4929 + } else {
4930 + //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
4931 + TRACE("sup_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
4932 + res->ops->drain_budget(res, delta);
4933 + }
4934 + if (res->state == RESERVATION_ACTIVE ||
4935 + res->state == RESERVATION_ACTIVE_IDLE)
4936 + {
4937 + /* make sure scheduler is invoked when this reservation expires
4938 + * its remaining budget */
4939 + //TRACE("requesting scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
4940 + sup_scheduler_update_after(sup_env, res->cur_budget);
4941 + }
4942 + //if (encountered_active == 2)
4943 + /* stop at the first ACTIVE reservation */
4944 + // break;
4945 + }
4946 + //TRACE("finished charging budgets\n");
4947 +}
4948 +
4949 +static void sup_replenish_budgets(struct sup_reservation_environment* sup_env)
4950 +{
4951 + struct list_head *pos, *next;
4952 + struct reservation *res;
4953 +
4954 + list_for_each_safe(pos, next, &sup_env->depleted_reservations) {
4955 + res = list_entry(pos, struct reservation, list);
4956 + if (res->next_replenishment <= sup_env->env.current_time) {
4957 + res->ops->replenish(res);
4958 + } else {
4959 + /* list is ordered by increasing depletion times */
4960 + break;
4961 + }
4962 + }
4963 + //TRACE("finished replenishing budgets\n");
4964 +
4965 + /* request a scheduler update at the next replenishment instant */
4966 + res = list_first_entry_or_null(&sup_env->depleted_reservations,
4967 + struct reservation, list);
4968 + if (res)
4969 + sup_scheduler_update_at(sup_env, res->next_replenishment);
4970 +}
4971 +
4972 +void sup_update_time(
4973 + struct sup_reservation_environment* sup_env,
4974 + lt_t now)
4975 +{
4976 + lt_t delta;
4977 +
4978 + /* If the time didn't advance, there is nothing to do.
4979 + * This check makes it safe to call sup_advance_time() potentially
4980 + * multiple times (e.g., via different code paths. */
4981 + //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time);
4982 + if (unlikely(now <= sup_env->env.current_time))
4983 + return;
4984 +
4985 + delta = now - sup_env->env.current_time;
4986 + sup_env->env.current_time = now;
4987 +
4988 + /* check if future updates are required */
4989 + if (sup_env->next_scheduler_update <= sup_env->env.current_time)
4990 + sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
4991 +
4992 + /* deplete budgets by passage of time */
4993 + //TRACE("CHARGE###\n");
4994 + sup_charge_budget(sup_env, delta);
4995 +
4996 + /* check if any budgets where replenished */
4997 + //TRACE("REPLENISH###\n");
4998 + sup_replenish_budgets(sup_env);
4999 +}
5000 +
5001 +struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env)
5002 +{
5003 + struct reservation *res, *next;
5004 + struct task_struct *tsk = NULL;
5005 + lt_t time_slice;
5006 +
5007 + list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
5008 + if (res->state == RESERVATION_ACTIVE) {
5009 + tsk = res->ops->dispatch_client(res, &time_slice);
5010 + if (likely(tsk)) {
5011 + if (time_slice)
5012 + sup_scheduler_update_after(sup_env, time_slice);
5013 + sup_scheduler_update_after(sup_env, res->cur_budget);
5014 + return tsk;
5015 + }
5016 + }
5017 + }
5018 +
5019 + return NULL;
5020 +}
5021 +
5022 +static void sup_res_change_state(
5023 + struct reservation_environment* env,
5024 + struct reservation *res,
5025 + reservation_state_t new_state)
5026 +{
5027 + struct sup_reservation_environment* sup_env;
5028 +
5029 + sup_env = container_of(env, struct sup_reservation_environment, env);
5030 +
5031 + TRACE("reservation R%d state %d->%d at %llu\n",
5032 + res->id, res->state, new_state, env->current_time);
5033 +
5034 + list_del(&res->list);
5035 + /* check if we need to reschedule because we lost an active reservation */
5036 + if (res->state == RESERVATION_ACTIVE && !sup_env->will_schedule)
5037 + sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
5038 + res->state = new_state;
5039 + sup_queue_reservation(sup_env, res);
5040 +}
5041 +
5042 +void sup_init(struct sup_reservation_environment* sup_env)
5043 +{
5044 + memset(sup_env, sizeof(*sup_env), 0);
5045 +
5046 + INIT_LIST_HEAD(&sup_env->active_reservations);
5047 + INIT_LIST_HEAD(&sup_env->depleted_reservations);
5048 + INIT_LIST_HEAD(&sup_env->inactive_reservations);
5049 +
5050 + sup_env->env.change_state = sup_res_change_state;
5051 +
5052 + sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
5053 +}
5054 +
5055 +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
5056 + unsigned int id)
5057 +{
5058 + struct reservation *res;
5059 +
5060 + list_for_each_entry(res, &gmp_env->active_reservations, list) {
5061 + if (res->id == id)
5062 + return res;
5063 + }
5064 + list_for_each_entry(res, &gmp_env->inactive_reservations, list) {
5065 + if (res->id == id)
5066 + return res;
5067 + }
5068 + list_for_each_entry(res, &gmp_env->depleted_reservations, list) {
5069 + if (res->id == id)
5070 + return res;
5071 + }
5072 +
5073 + return NULL;
5074 +}
5075 +
5076 +
5077 +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env,
5078 + unsigned int id)
5079 +{
5080 + struct next_timer_event *event;
5081 +
5082 + list_for_each_entry(event, &gmp_env->next_events, list) {
5083 + if (event->id == id)
5084 + return event;
5085 + }
5086 +
5087 + return NULL;
5088 +}
5089 +
5090 +
5091 +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env,
5092 + lt_t when)
5093 +{
5094 + struct next_timer_event *event;
5095 +
5096 + list_for_each_entry(event, &gmp_env->next_events, list) {
5097 + if (event->next_update == when)
5098 + return event;
5099 + }
5100 +
5101 + return NULL;
5102 +}
5103 +
5104 +#define TIMER_RESOLUTION 100000L
5105 +
5106 +static void gmp_add_event(
5107 + struct gmp_reservation_environment* gmp_env,
5108 + lt_t when, unsigned int id, event_type_t type)
5109 +{
5110 + struct next_timer_event *nevent, *queued;
5111 + struct list_head *pos;
5112 + int found = 0, update = 0;
5113 +
5114 + //when = div64_u64(when, TIMER_RESOLUTION);
5115 + //when *= TIMER_RESOLUTION;
5116 +//printk(KERN_ALERT "GMP_ADD id=%d type=%d when=%llu\n", id, type, when);
5117 + nevent = gmp_find_event_by_id(gmp_env, id);
5118 +
5119 + if (nevent)
5120 + TRACE("EVENT R%d update prev = %llu, new = %llu\n", nevent->id, nevent->next_update, when);
5121 +
5122 + if (nevent && nevent->next_update > when) {
5123 + list_del(&nevent->list);
5124 + update = 1;
5125 +
5126 + }
5127 +
5128 + if (!nevent || nevent->type != type || update == 1) {
5129 + if (update == 0)
5130 + nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
5131 + BUG_ON(!nevent);
5132 + nevent->next_update = when;
5133 + nevent->id = id;
5134 + nevent->type = type;
5135 + nevent->timer_armed_on = NO_CPU;
5136 +
5137 + list_for_each(pos, &gmp_env->next_events) {
5138 + queued = list_entry(pos, struct next_timer_event, list);
5139 + if (queued->next_update > nevent->next_update) {
5140 + list_add(&nevent->list, pos->prev);
5141 + found = 1;
5142 + TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at before %llu\n", nevent->id, nevent->type, nevent->next_update, queued->next_update);
5143 + break;
5144 + }
5145 + }
5146 +
5147 + if (!found) {
5148 + list_add_tail(&nevent->list, &gmp_env->next_events);
5149 + TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update);
5150 + }
5151 + } else {
5152 + //TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
5153 +; //printk(KERN_ALERT "EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
5154 + }
5155 +
5156 + TRACE("======START PRINTING EVENT LIST======\n");
5157 + gmp_print_events(gmp_env, litmus_clock());
5158 + TRACE("======FINISH PRINTING EVENT LIST======\n");
5159 +}
5160 +
5161 +void gmp_add_event_after(
5162 + struct gmp_reservation_environment* gmp_env, lt_t timeout, unsigned int id, event_type_t type)
5163 +{
5164 + //printk(KERN_ALERT "ADD_EVENT_AFTER id = %d\n", id);
5165 + gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type);
5166 +}
5167 +
5168 +static void gmp_queue_depleted(
5169 + struct gmp_reservation_environment* gmp_env,
5170 + struct reservation *res)
5171 +{
5172 + struct list_head *pos;
5173 + struct reservation *queued;
5174 + int found = 0;
5175 +
5176 +//printk(KERN_ALERT "R%d request to enqueue depleted_list\n", res->id);
5177 +
5178 + list_for_each(pos, &gmp_env->depleted_reservations) {
5179 + queued = list_entry(pos, struct reservation, list);
5180 + if (queued && (queued->next_replenishment > res->next_replenishment)) {
5181 +//printk(KERN_ALERT "QUEUED R%d %llu\n", queued->id, queued->next_replenishment);
5182 + list_add(&res->list, pos->prev);
5183 + found = 1;
5184 + break;
5185 + }
5186 + }
5187 +
5188 + if (!found)
5189 + list_add_tail(&res->list, &gmp_env->depleted_reservations);
5190 +
5191 + TRACE("R%d queued to depleted_list\n", res->id);
5192 +//printk(KERN_ALERT "R%d queued to depleted_list\n", res->id);
5193 + gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
5194 +}
5195 +
5196 +static void gmp_queue_active(
5197 + struct gmp_reservation_environment* gmp_env,
5198 + struct reservation *res)
5199 +{
5200 + struct list_head *pos;
5201 + struct reservation *queued;
5202 + int check_preempt = 1, found = 0;
5203 +
5204 + TRACE("R%d has been queued on active\n", res->id);
5205 +
5206 + list_for_each(pos, &gmp_env->active_reservations) {
5207 + queued = list_entry(pos, struct reservation, list);
5208 + if (queued->priority > res->priority) {
5209 + list_add(&res->list, pos->prev);
5210 + found = 1;
5211 + break;
5212 + } else if (queued->scheduled_on == NO_CPU)
5213 + check_preempt = 0;
5214 + }
5215 +
5216 + if (!found)
5217 + list_add_tail(&res->list, &gmp_env->active_reservations);
5218 +
5219 + /* check for possible preemption */
5220 + if (res->state == RESERVATION_ACTIVE && check_preempt)
5221 + gmp_env->schedule_now++;
5222 +
5223 +#if BUDGET_ENFORCEMENT_AT_C
5224 + gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
5225 +#endif
5226 + res->event_added = 1;
5227 +}
5228 +
5229 +static void gmp_queue_reservation(
5230 + struct gmp_reservation_environment* gmp_env,
5231 + struct reservation *res)
5232 +{
5233 +
5234 +//printk(KERN_ALERT "DEBUG: Passed %s %d %p R%d STATE %d\n",__FUNCTION__,__LINE__, gmp_env, res->id, res->state);
5235 + switch (res->state) {
5236 + case RESERVATION_INACTIVE:
5237 + list_add(&res->list, &gmp_env->inactive_reservations);
5238 + break;
5239 +
5240 + case RESERVATION_DEPLETED:
5241 + gmp_queue_depleted(gmp_env, res);
5242 + break;
5243 +
5244 + case RESERVATION_ACTIVE_IDLE:
5245 + case RESERVATION_ACTIVE:
5246 + gmp_queue_active(gmp_env, res);
5247 + break;
5248 + }
5249 +}
5250 +
5251 +void gmp_add_new_reservation(
5252 + struct gmp_reservation_environment* gmp_env,
5253 + struct reservation* new_res)
5254 +{
5255 + new_res->env = &gmp_env->env;
5256 + gmp_queue_reservation(gmp_env, new_res);
5257 +}
5258 +
5259 +#if BUDGET_ENFORCEMENT_AT_C
5260 +static void gmp_charge_budget(
5261 + struct gmp_reservation_environment* gmp_env,
5262 + lt_t delta)
5263 +{
5264 + struct list_head *pos, *next;
5265 + struct reservation *res;
5266 +
5267 + list_for_each_safe(pos, next, &gmp_env->active_reservations) {
5268 + int drained = 0;
5269 + /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
5270 + res = list_entry(pos, struct reservation, list);
5271 + if (res->state == RESERVATION_ACTIVE) {
5272 + TRACE("gmp_charge_budget ACTIVE R%u scheduled_on=%d drain %llu\n", res->id, res->scheduled_on, delta);
5273 + if (res->scheduled_on != NO_CPU && res->blocked_by_ghost == 0) {
5274 + TRACE("DRAIN !!\n");
5275 + drained = 1;
5276 + res->ops->drain_budget(res, delta);
5277 + } else {
5278 + TRACE("NO DRAIN (not scheduled)!!\n");
5279 + }
5280 + } else {
5281 + //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
5282 + if (res->state != RESERVATION_ACTIVE_IDLE)
5283 + TRACE("BUG!!!!!!!!!!!! gmp_charge_budget()\n");
5284 + TRACE("gmp_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
5285 + //if (res->is_ghost != NO_CPU) {
5286 + TRACE("DRAIN !!\n");
5287 + drained = 1;
5288 + res->ops->drain_budget(res, delta);
5289 + //}
5290 + }
5291 + if ((res->state == RESERVATION_ACTIVE ||
5292 + res->state == RESERVATION_ACTIVE_IDLE) && (drained == 1))
5293 + {
5294 + /* make sure scheduler is invoked when this reservation expires
5295 + * its remaining budget */
5296 + TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
5297 + gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
5298 + res->event_added = 1;
5299 + }
5300 + //if (encountered_active == 2)
5301 + /* stop at the first ACTIVE reservation */
5302 + // break;
5303 + }
5304 + //TRACE("finished charging budgets\n");
5305 +}
5306 +#else
5307 +
5308 +static void gmp_charge_budget(
5309 + struct gmp_reservation_environment* gmp_env,
5310 + lt_t delta)
5311 +{
5312 + return;
5313 +}
5314 +
5315 +#endif
5316 +
5317 +static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
5318 +{
5319 + struct list_head *pos, *next;
5320 + struct reservation *res;
5321 +
5322 + list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
5323 + res = list_entry(pos, struct reservation, list);
5324 + if (res->next_replenishment <= gmp_env->env.current_time) {
5325 + res->ops->replenish(res);
5326 + if (res->is_ghost != NO_CPU) {
5327 + TRACE("R%d replenished! scheduled_on=%d\n", res->id, res->scheduled_on);
5328 + }
5329 + } else {
5330 + /* list is ordered by increasing depletion times */
5331 + break;
5332 + }
5333 + }
5334 + //TRACE("finished replenishing budgets\n");
5335 +}
5336 +
5337 +#define EPSILON 50
5338 +
5339 +/* return schedule_now */
5340 +int gmp_update_time(
5341 + struct gmp_reservation_environment* gmp_env,
5342 + lt_t now)
5343 +{
5344 + struct next_timer_event *event, *next;
5345 + lt_t delta, ret;
5346 +
5347 + /* If the time didn't advance, there is nothing to do.
5348 + * This check makes it safe to call sup_advance_time() potentially
5349 + * multiple times (e.g., via different code paths. */
5350 + //TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
5351 + if (unlikely(now <= gmp_env->env.current_time + EPSILON))
5352 + return 0;
5353 +
5354 + delta = now - gmp_env->env.current_time;
5355 + gmp_env->env.current_time = now;
5356 +
5357 +
5358 + //gmp_print_events(gmp_env, now);
5359 + /* deplete budgets by passage of time */
5360 + //TRACE("CHARGE###\n");
5361 + gmp_charge_budget(gmp_env, delta);
5362 +
5363 + /* check if any budgets where replenished */
5364 + //TRACE("REPLENISH###\n");
5365 + gmp_replenish_budgets(gmp_env);
5366 +
5367 +
5368 + list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
5369 + if (event->next_update < now) {
5370 + list_del(&event->list);
5371 + //TRACE("EVENT at %llu IS DELETED\n", event->next_update);
5372 + kfree(event);
5373 + } else {
5374 + break;
5375 + }
5376 + }
5377 +
5378 + //gmp_print_events(gmp_env, litmus_clock());
5379 +
5380 + ret = min(gmp_env->schedule_now, NR_CPUS);
5381 + gmp_env->schedule_now = 0;
5382 +
5383 + return ret;
5384 +}
5385 +
5386 +void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now)
5387 +{
5388 + struct next_timer_event *event, *next;
5389 +
5390 + TRACE("GLOBAL EVENTS now=%llu\n", now);
5391 + list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
5392 + TRACE("at %llu type=%d id=%d armed_on=%d\n", event->next_update, event->type, event->id, event->timer_armed_on);
5393 + }
5394 +}
5395 +
5396 +static void gmp_res_change_state(
5397 + struct reservation_environment* env,
5398 + struct reservation *res,
5399 + reservation_state_t new_state)
5400 +{
5401 + struct gmp_reservation_environment* gmp_env;
5402 +
5403 + gmp_env = container_of(env, struct gmp_reservation_environment, env);
5404 +
5405 + TRACE("GMP reservation R%d state %d->%d at %llu\n",
5406 + res->id, res->state, new_state, env->current_time);
5407 +
5408 + list_del(&res->list);
5409 + /* check if we need to reschedule because we lost an active reservation */
5410 + if (res->state == RESERVATION_ACTIVE)
5411 + gmp_env->schedule_now++;
5412 + res->state = new_state;
5413 + gmp_queue_reservation(gmp_env, res);
5414 +}
5415 +
5416 +void gmp_init(struct gmp_reservation_environment* gmp_env)
5417 +{
5418 + memset(gmp_env, sizeof(*gmp_env), 0);
5419 +
5420 + INIT_LIST_HEAD(&gmp_env->active_reservations);
5421 + INIT_LIST_HEAD(&gmp_env->depleted_reservations);
5422 + INIT_LIST_HEAD(&gmp_env->inactive_reservations);
5423 + INIT_LIST_HEAD(&gmp_env->next_events);
5424 +
5425 + gmp_env->env.change_state = gmp_res_change_state;
5426 +
5427 + gmp_env->schedule_now = 0;
5428 + gmp_env->will_schedule = false;
5429 +
5430 + //raw_spin_lock_init(&gmp_env->lock);
5431 +}
5432 diff --git litmus/sched_mc2.c litmus/sched_mc2.c
5433 new file mode 100644
5434 index 0000000..d7cf3fb
5435 --- /dev/null
5436 +++ litmus/sched_mc2.c
5437 @@ -0,0 +1,2150 @@
5438 +/*
5439 + * litmus/sched_mc2.c
5440 + *
5441 + * Implementation of the Mixed-Criticality on MultiCore scheduler
5442 + *
5443 + * This plugin implements a scheduling algorithm proposed in
5444 + * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper.
5445 + */
5446 +
5447 +#include <linux/percpu.h>
5448 +#include <linux/slab.h>
5449 +#include <linux/rwlock.h>
5450 +#include <asm/uaccess.h>
5451 +#include <linux/delay.h>
5452 +
5453 +#include <litmus/sched_plugin.h>
5454 +#include <litmus/preempt.h>
5455 +#include <litmus/debug_trace.h>
5456 +
5457 +#include <litmus/litmus.h>
5458 +#include <litmus/jobs.h>
5459 +#include <litmus/budget.h>
5460 +#include <litmus/litmus_proc.h>
5461 +#include <litmus/sched_trace.h>
5462 +#include <litmus/cache_proc.h>
5463 +#include <litmus/trace.h>
5464 +
5465 +#include <litmus/mc2_common.h>
5466 +#include <litmus/reservation.h>
5467 +#include <litmus/polling_reservations.h>
5468 +
5469 +#define BUDGET_ENFORCEMENT_AT_C 0
5470 +
5471 +extern atomic_t num_sync_released;
5472 +extern void do_partition(enum crit_level lv, int cpu);
5473 +
5474 +/* _global_env - reservation container for level-C tasks */
5475 +struct gmp_reservation_environment _global_env_modes[NR_MODES];
5476 +struct gmp_reservation_environment *_global_env;
5477 +raw_spinlock_t global_lock;
5478 +
5479 +/* cpu_entry - keep track of a running task on a cpu
5480 + * This state is used to decide the lowest priority cpu
5481 + */
5482 +struct cpu_entry {
5483 + struct task_struct *scheduled;
5484 + lt_t deadline;
5485 + int cpu;
5486 + enum crit_level lv;
5487 + /* if will_schedule is true, this cpu is already selected and
5488 + mc2_schedule() will be executed soon. */
5489 + bool will_schedule;
5490 +};
5491 +
5492 +/* cpu_priority - a global state for choosing the lowest priority CPU */
5493 +struct cpu_priority {
5494 + raw_spinlock_t lock;
5495 + struct cpu_entry cpu_entries[NR_CPUS];
5496 +};
5497 +
5498 +struct cpu_priority _lowest_prio_cpu;
5499 +
5500 +/* mc2_task_state - a task state structure */
5501 +struct mc2_task_state {
5502 + /* A task can be shared by multiple modes */
5503 + struct task_client res_info[NR_MODES];
5504 + /* if cpu == -1, this task is a global task (level C) */
5505 + int cpu;
5506 + bool has_departed;
5507 + struct mc2_task mc2_param;
5508 +};
5509 +
5510 +/* mc2_cpu_state - maintain the scheduled state and ghost jobs
5511 + * timer : timer for partitioned tasks (level A and B)
5512 + */
5513 +struct mc2_cpu_state {
5514 + raw_spinlock_t lock;
5515 +
5516 + struct sup_reservation_environment sup_env_modes[NR_MODES];
5517 + struct sup_reservation_environment *sup_env;
5518 + struct hrtimer timer;
5519 +
5520 + int cpu;
5521 + struct task_struct* scheduled;
5522 +};
5523 +
5524 +static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state);
5525 +
5526 +static int resched_cpu[NR_CPUS];
5527 +static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state);
5528 +
5529 +#define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id))
5530 +#define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state))
5531 +
5532 +unsigned int mode; //currently executing mode, from 0 to NR_MODES-1
5533 +unsigned int requested_mode; //The pending mode
5534 +/* Prevent multiple requests from entering and prevent request from entering while old
5535 + * is being enacted */
5536 +raw_spinlock_t mode_lock;
5537 +
5538 +unsigned int mode_sizes[NR_MODES];
5539 +unsigned int res_reported;
5540 +bool cpu_0_spin_flag;
5541 +bool seen_once;
5542 +bool cpu_0_task_exist;
5543 +bool mode_changed;
5544 +bool mode_poll_exited;
5545 +static DEFINE_PER_CPU(unsigned long, mode_counter);
5546 +
5547 +/* Mode change macros */
5548 +#define local_mode_counter() (this_cpu_ptr(&mode_counter))
5549 +#define cpu_0_mode_counter() (&per_cpu(mode_counter, 0))
5550 +#define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum))
5551 +#define pending (mode != requested_mode)
5552 +#define ready (!res_reported)
5553 +
5554 +/*
5555 + * To be called from level A task's with period equal to
5556 + * A and B hyperperiod
5557 + */
5558 +
5559 +asmlinkage long sys_enact_mode(void)
5560 +{
5561 + struct mc2_cpu_state *state = local_cpu_state();
5562 + struct reservation *res;
5563 + struct list_head *pos;
5564 + unsigned long flags;
5565 + if (state->cpu == 0 && !mode_poll_exited){
5566 + unsigned long *other_cpu_counter;
5567 + unsigned long cpu0_val = this_cpu_read(mode_counter);
5568 + int i;
5569 + for(i = 1; i < NR_CPUS; i++){
5570 + other_cpu_counter = &per_cpu(mode_counter, i);
5571 + while(cpu0_val == *other_cpu_counter && !mode_poll_exited){
5572 + udelay(1);
5573 + }
5574 + }
5575 + mode_changed = false;
5576 + if (pending){ //MCR has entered
5577 + raw_spin_lock_irqsave(&state->lock, flags);
5578 + raw_spin_lock(&global_lock);
5579 + raw_spin_lock(&mode_lock);
5580 +
5581 + if (!seen_once){
5582 + TRACE_TASK(current, "REQUEST\n");
5583 + sched_trace_request_mode(current);
5584 + //clean up jobs that are already done
5585 + //after this jobs report themselves
5586 + list_for_each(pos, &_global_env->active_reservations){
5587 + res = list_entry(pos, struct reservation, list);
5588 + if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){
5589 + res->reported = 1;
5590 + TRACE_TASK(res->tsk,"R%d RES_REPORTED_ACTIVE = %d mode %d\n", res->id, res_reported, res->mode);
5591 + res_reported--;
5592 + }
5593 + }
5594 + list_for_each(pos, &_global_env->depleted_reservations){
5595 + res = list_entry(pos, struct reservation, list);
5596 + if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){
5597 + res->reported = 1;
5598 + TRACE_TASK(res->tsk,"R%d RES_REPORTED_DEPLETED = %d mode %d\n",res->id, res_reported, res->mode);
5599 + res_reported--;
5600 + }
5601 +
5602 + }
5603 + list_for_each(pos, &_global_env->inactive_reservations){
5604 + res = list_entry(pos, struct reservation, list);
5605 + if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){
5606 + res->reported = 1;
5607 + TRACE_TASK(res->tsk,"R%d RES_REPORTED_INACTIVE = %d mode %d\n", res->id, res_reported, res->mode);
5608 + res_reported--;
5609 + }
5610 + }
5611 + seen_once = true;
5612 + }
5613 + if (seen_once && !(ready)){
5614 + TRACE("Mode change waiting on tasks\n");
5615 + list_for_each(pos, &_global_env->active_reservations){
5616 + res = list_entry(pos, struct reservation, list);
5617 + if (!res->reported){
5618 + TRACE_TASK(res->tsk, "R%d is ACTIVE LIST with state %d scheduled on %d and hasn't reported\n", res->id, res->state, res->scheduled_on);
5619 + }
5620 + }
5621 + list_for_each(pos, &_global_env->depleted_reservations){
5622 + res = list_entry(pos, struct reservation, list);
5623 + if (!res->reported){
5624 + TRACE_TASK(res->tsk, "R%d is DEPLETED LIST and hasn't reported\n", res->id);
5625 + }
5626 + }
5627 + list_for_each(pos, &_global_env->inactive_reservations){
5628 + res = list_entry(pos, struct reservation, list);
5629 + if (!res->reported){
5630 + TRACE_TASK(res->tsk, "R%d is INACTIVE LIST and hasn't reported\n", res->id);
5631 + }
5632 + }
5633 + }
5634 + if( ready ){ //C is throttled
5635 + lt_t new_mode_basetime = get_release(current);
5636 + mode = requested_mode;
5637 + TRACE("Mode has been changed.\n");
5638 + mode_changed = true;
5639 + _global_env = &_global_env_modes[mode];
5640 + /* set res->reported for new global tasks */
5641 + list_for_each(pos, &_global_env->active_reservations){
5642 + res = list_entry(pos, struct reservation, list);
5643 + release_at(res->tsk, new_mode_basetime);
5644 + res->reported = 0;
5645 + }
5646 + list_for_each(pos, &_global_env->depleted_reservations){
5647 + res = list_entry(pos, struct reservation, list);
5648 + release_at(res->tsk, new_mode_basetime);
5649 + res->reported = 0;
5650 + }
5651 + list_for_each(pos, &_global_env->inactive_reservations){
5652 + res = list_entry(pos, struct reservation, list);
5653 + release_at(res->tsk, new_mode_basetime);
5654 + res->reported = 0;
5655 + }
5656 + gmp_update_time(_global_env, litmus_clock());
5657 +
5658 + state->sup_env = &state->sup_env_modes[mode];
5659 + list_for_each(pos, &state->sup_env->active_reservations){
5660 + res = list_entry(pos, struct reservation, list);
5661 + release_at(res->tsk, new_mode_basetime);
5662 + }
5663 + list_for_each(pos, &state->sup_env->depleted_reservations){
5664 + res = list_entry(pos, struct reservation, list);
5665 + release_at(res->tsk, new_mode_basetime);
5666 + }
5667 + list_for_each(pos, &state->sup_env->inactive_reservations){
5668 + res = list_entry(pos, struct reservation, list);
5669 + release_at(res->tsk, new_mode_basetime);
5670 + }
5671 + sup_update_time(state->sup_env, litmus_clock());
5672 + sched_trace_enact_mode(current);
5673 + TRACE("ENACT\n");
5674 + }
5675 + raw_spin_unlock(&mode_lock);
5676 + raw_spin_unlock(&global_lock);
5677 + raw_spin_unlock_irqrestore(&state->lock, flags);
5678 +
5679 + raw_spin_lock(&state->lock);
5680 + mc2_update_timer_and_unlock(state);
5681 + }
5682 + this_cpu_inc(mode_counter);
5683 + }
5684 + else if (!mode_poll_exited) {
5685 + unsigned long *cpu0_counter = cpu_0_mode_counter();
5686 + unsigned long my_val;
5687 + this_cpu_inc(mode_counter);
5688 + my_val = this_cpu_read(mode_counter);
5689 + //spin, wait for CPU 0 to stabilize mode decision
5690 + //before scheduling next hyperperiod
5691 +
5692 + while (*cpu0_counter < my_val && !mode_poll_exited){
5693 + udelay(1);
5694 + }
5695 + TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter));
5696 + if (mode_changed) {
5697 + lt_t new_mode_basetime = get_release(current);
5698 + TRACE("CPU%d mode changed\n",state->cpu);
5699 + hrtimer_cancel(&state->timer); //stop listening to old mode timers
5700 + TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock());
5701 + raw_spin_lock_irqsave(&state->lock, flags);
5702 + state->sup_env = &state->sup_env_modes[mode];
5703 + list_for_each(pos, &state->sup_env->active_reservations){
5704 + res = list_entry(pos, struct reservation, list);
5705 + release_at(res->tsk, new_mode_basetime);
5706 + }
5707 + list_for_each(pos, &state->sup_env->depleted_reservations){
5708 + res = list_entry(pos, struct reservation, list);
5709 + release_at(res->tsk, new_mode_basetime);
5710 + }
5711 + list_for_each(pos, &state->sup_env->inactive_reservations){
5712 + res = list_entry(pos, struct reservation, list);
5713 + release_at(res->tsk, new_mode_basetime);
5714 + }
5715 + sup_update_time(state->sup_env, litmus_clock());
5716 + raw_spin_unlock_irqrestore(&state->lock, flags);
5717 +
5718 + raw_spin_lock(&state->lock);
5719 + mc2_update_timer_and_unlock(state);
5720 +
5721 + }
5722 + }
5723 + else {
5724 + TRACE("CPU%d exits sys_enact_mode(). No cpu_0_task_exist.%d\n",state->cpu, mode_changed);
5725 + return 0;
5726 + }
5727 +
5728 + return 0;
5729 +}
5730 +
5731 +
5732 +/*
5733 + * Called from non-real time program
5734 + * Protect by exclusive lock to prevent from occuring while mode change is enacted
5735 + */
5736 +
5737 +#define GET_MODE -1
5738 +#define GET_REP -2
5739 +#define GET_SIZE -3
5740 +
5741 +asmlinkage long sys_request_mode(int new_mode){
5742 + TRACE("Requesting mode %d\n", new_mode);
5743 + preempt_disable();
5744 + raw_spin_lock(&mode_lock);
5745 + if (new_mode == GET_MODE){
5746 + int tmp_mode = mode;
5747 + raw_spin_unlock(&mode_lock);
5748 + preempt_enable();
5749 + return tmp_mode;
5750 + }
5751 + if (new_mode == GET_REP){
5752 + int tmp_rep = res_reported;
5753 + raw_spin_unlock(&mode_lock);
5754 + preempt_enable();
5755 + return tmp_rep;
5756 + }
5757 + if (new_mode == GET_SIZE){
5758 + int tmp_size = mode_sizes[mode];
5759 + raw_spin_unlock(&mode_lock);
5760 + preempt_enable();
5761 + return tmp_size;
5762 + }
5763 + if (pending){
5764 + TRACE("Request to %d denied due to pending to %d\n", new_mode, requested_mode);
5765 + raw_spin_unlock(&mode_lock);
5766 + preempt_enable();
5767 + TRACE("MCR rejected because the previous MCR is pedning.\n");
5768 + return -EAGAIN;
5769 + }
5770 + if (mode == new_mode){
5771 + TRACE("Request to %d denied becuase I am in that mode already\n", new_mode);
5772 + raw_spin_unlock(&mode_lock);
5773 + preempt_enable();
5774 + return -EINVAL;
5775 + }
5776 + requested_mode = new_mode;
5777 + TRACE("MCR received: %d, old:%d\n",requested_mode,mode);
5778 + res_reported = mode_sizes[mode];
5779 + TRACE_CUR("RES_REPORTED = %d\n",res_reported);
5780 + seen_once = false;
5781 + raw_spin_unlock(&mode_lock);
5782 + preempt_enable();
5783 + return 0;
5784 +}
5785 +
5786 +
5787 +/* get_mc2_state - get the task's state */
5788 +static struct mc2_task_state* get_mc2_state(struct task_struct *tsk)
5789 +{
5790 + struct mc2_task_state* tinfo;
5791 +
5792 + tinfo = (struct mc2_task_state*)tsk_rt(tsk)->plugin_state;
5793 +
5794 + if (tinfo)
5795 + return tinfo;
5796 + else
5797 + return NULL;
5798 +}
5799 +
5800 +/* get_task_crit_level - return the criticaility level of a task */
5801 +static enum crit_level get_task_crit_level(struct task_struct *tsk)
5802 +{
5803 + struct mc2_task *mp;
5804 +
5805 + if (!tsk || !is_realtime(tsk))
5806 + return NUM_CRIT_LEVELS;
5807 +
5808 + mp = tsk_rt(tsk)->mc2_data;
5809 +
5810 + if (!mp)
5811 + return NUM_CRIT_LEVELS;
5812 + else
5813 + return mp->crit;
5814 +}
5815 +
5816 +/* task_depart - remove a task from its reservation
5817 + * If the job has remaining budget, convert it to a ghost job
5818 + * and update crit_entries[]
5819 + *
5820 + * @job_complete indicate whether job completes or not
5821 + */
5822 +static void task_departs(struct task_struct *tsk, int job_complete)
5823 +{
5824 + struct mc2_task_state* tinfo = get_mc2_state(tsk);
5825 + struct reservation* res = NULL;
5826 + struct reservation_client *client = NULL;
5827 + int i;
5828 + BUG_ON(!is_realtime(tsk));
5829 +
5830 + for(i = 0; i < NR_MODES; i++){
5831 + if (! in_mode(tsk, i) && i != 0)
5832 + continue;
5833 + res = tinfo->res_info[i].client.reservation;
5834 + client = &tinfo->res_info[i].client;
5835 + BUG_ON(!res);
5836 + BUG_ON(!client);
5837 +
5838 + if (job_complete)
5839 + res->cur_budget = 0;
5840 +
5841 + res->ops->client_departs(res, client, job_complete);
5842 + }
5843 +
5844 + tinfo->has_departed = true;
5845 + TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock());
5846 +}
5847 +
5848 +/* task_arrive - put a task into its reservation
5849 + * If the job was a ghost job, remove it from crit_entries[]
5850 + */
5851 +static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
5852 +{
5853 + struct mc2_task_state* tinfo = get_mc2_state(tsk);
5854 + struct reservation* res;
5855 + struct reservation_client *client;
5856 + enum crit_level lv = get_task_crit_level(tsk);
5857 + int i;
5858 +
5859 + switch(lv) {
5860 + case CRIT_LEVEL_A:
5861 + case CRIT_LEVEL_B:
5862 + TS_RELEASE_START;
5863 + break;
5864 + case CRIT_LEVEL_C:
5865 + TS_RELEASE_C_START;
5866 + break;
5867 + default:
5868 + break;
5869 + }
5870 +
5871 + tinfo->has_departed = false;
5872 +
5873 + TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock());
5874 +
5875 + for(i = 0; i < NR_MODES; i++){
5876 + if (! in_mode(tsk, i) && i != 0)
5877 + continue;
5878 + res = tinfo->res_info[i].client.reservation;
5879 + client = &tinfo->res_info[i].client;
5880 +
5881 + res->ops->client_arrives(res, client);
5882 + }
5883 +
5884 + switch(lv) {
5885 + case CRIT_LEVEL_A:
5886 + case CRIT_LEVEL_B:
5887 + TS_RELEASE_END;
5888 + break;
5889 + case CRIT_LEVEL_C:
5890 + TS_RELEASE_C_END;
5891 + break;
5892 + default:
5893 + break;
5894 + }
5895 +}
5896 +
5897 +/* get_lowest_prio_cpu - return the lowest priority cpu
5898 + * This will be used for scheduling level-C tasks.
5899 + * If all CPUs are running tasks which has
5900 + * higher priority than level C, return NO_CPU.
5901 + */
5902 +static int get_lowest_prio_cpu(lt_t priority)
5903 +{
5904 + struct cpu_entry *ce;
5905 + int cpu, ret = NO_CPU;
5906 + lt_t latest_deadline = 0;
5907 +
5908 + if (priority == LITMUS_NO_PRIORITY)
5909 + return ret;
5910 +
5911 + ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
5912 + if (!ce->will_schedule && !ce->scheduled) {
5913 + TRACE("CPU %d (local) is the lowest (Idle)!\n", ce->cpu);
5914 + return ce->cpu;
5915 + } else {
5916 + TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0);
5917 + }
5918 +
5919 + for_each_online_cpu(cpu) {
5920 + ce = &_lowest_prio_cpu.cpu_entries[cpu];
5921 + /* If a CPU will call schedule() in the near future, we don't
5922 + return that CPU. */
5923 +
5924 + if (!ce->will_schedule) {
5925 + if (!ce->scheduled) {
5926 + /* Idle cpu, return this. */
5927 + TRACE("CPU %d is the lowest (Idle)!\n", ce->cpu);
5928 + return ce->cpu;
5929 + } else if (ce->lv == CRIT_LEVEL_C &&
5930 + ce->deadline > latest_deadline) {
5931 + latest_deadline = ce->deadline;
5932 + ret = ce->cpu;
5933 + }
5934 + }
5935 + }
5936 +
5937 + TRACE("CPU %d is the lowest! deadline = %llu, my priority = %llu\n", ret, latest_deadline, priority);
5938 +
5939 + if (priority >= latest_deadline) {
5940 + TRACE("CPU %d is running a higher-priority task. return NO_CPU\n", ret);
5941 + ret = NO_CPU;
5942 + }
5943 +
5944 + return ret;
5945 +}
5946 +
5947 +/* NOTE: drops state->lock */
5948 +/* mc2_update_timer_and_unlock - set a timer and unlock state->lock.
5949 + * Whenever res_env.current_time is updated,
5950 + * we check next_scheduler_update and set
5951 + * a timer.
5952 + * If there exist a global event which is
5953 + * not armed on any CPU and g_timer is not
5954 + * active, set a g_timer for that event.
5955 + */
5956 +static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
5957 +{
5958 + int local, cpus;
5959 + lt_t update, now;
5960 + struct next_timer_event *event, *next;
5961 + int reschedule[NR_CPUS];
5962 + unsigned long flags;
5963 +
5964 + local_irq_save(flags);
5965 +
5966 + for (cpus = 0; cpus<NR_CPUS; cpus++)
5967 + reschedule[cpus] = 0;
5968 +
5969 + update = state->sup_env->next_scheduler_update;
5970 + now = state->sup_env->env.current_time;
5971 +
5972 + /* Be sure we're actually running on the right core,
5973 + * as pres_update_timer() is also called from pres_task_resume(),
5974 + * which might be called on any CPU when a thread resumes.
5975 + */
5976 + local = local_cpu_state() == state;
5977 +
5978 + raw_spin_lock(&global_lock);
5979 +
5980 + list_for_each_entry_safe(event, next, &_global_env->next_events, list) {
5981 + /* If the event time is already passed, we call schedule() on
5982 + the lowest priority cpu */
5983 + if (event->next_update >= update) {
5984 + break;
5985 + }
5986 +
5987 + if (event->next_update < litmus_clock()) {
5988 + if (event->timer_armed_on == NO_CPU) {
5989 + struct reservation *res = gmp_find_by_id(_global_env, event->id);
5990 + int cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY);
5991 + list_del(&event->list);
5992 + kfree(event);
5993 + if (cpu != NO_CPU) {
5994 + _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
5995 + reschedule[cpu] = 1;
5996 + }
5997 + }
5998 + } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) {
5999 + event->timer_armed_on = state->cpu;
6000 + update = event->next_update;
6001 + break;
6002 + }
6003 + }
6004 +
6005 + /* Must drop state lock before calling into hrtimer_start(), which
6006 + * may raise a softirq, which in turn may wake ksoftirqd. */
6007 +
6008 + raw_spin_unlock(&global_lock);
6009 + local_irq_restore(flags);
6010 + raw_spin_unlock(&state->lock);
6011 +
6012 + if ((update <= now) || reschedule[state->cpu]) {
6013 + reschedule[state->cpu] = 0;
6014 + litmus_reschedule(state->cpu);
6015 + } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) {
6016 + /* Reprogram only if not already set correctly. */
6017 + if (!hrtimer_active(&state->timer) ||
6018 + ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) {
6019 + TRACE("canceling timer...at %llu\n",
6020 + ktime_to_ns(hrtimer_get_expires(&state->timer)));
6021 + hrtimer_cancel(&state->timer);
6022 + TRACE("setting scheduler timer for %llu\n", update);
6023 + /* We cannot use hrtimer_start() here because the
6024 + * wakeup flag must be set to zero. */
6025 + __hrtimer_start_range_ns(&state->timer,
6026 + ns_to_ktime(update),
6027 + 0 /* timer coalescing slack */,
6028 + HRTIMER_MODE_ABS_PINNED,
6029 + 0 /* wakeup */);
6030 + if (update < litmus_clock()) {
6031 + /* uh oh, timer expired while trying to set it */
6032 + TRACE("timer expired during setting "
6033 + "update:%llu now:%llu actual:%llu\n",
6034 + update, now, litmus_clock());
6035 + /* The timer HW may not have been reprogrammed
6036 + * correctly; force rescheduling now. */
6037 + litmus_reschedule(state->cpu);
6038 + }
6039 + }
6040 + } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) {
6041 + /* Poke remote core only if timer needs to be set earlier than
6042 + * it is currently set.
6043 + */
6044 + TRACE("mc2_update_timer for remote CPU %d (update=%llu, "
6045 + "active:%d, set:%llu)\n",
6046 + state->cpu, update, hrtimer_active(&state->timer),
6047 + ktime_to_ns(hrtimer_get_expires(&state->timer)));
6048 + if (!hrtimer_active(&state->timer) ||
6049 + ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) {
6050 + TRACE("poking CPU %d so that it can update its "
6051 + "scheduling timer (active:%d, set:%llu)\n",
6052 + state->cpu,
6053 + hrtimer_active(&state->timer),
6054 + ktime_to_ns(hrtimer_get_expires(&state->timer)));
6055 + }
6056 + }
6057 +}
6058 +
6059 +/* update_cpu_prio - Update cpu's priority
6060 + * When a cpu picks a new task, call this function
6061 + * to update cpu priorities.
6062 + */
6063 +static void update_cpu_prio(struct mc2_cpu_state *state)
6064 +{
6065 + struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu];
6066 + enum crit_level lv = get_task_crit_level(state->scheduled);
6067 +
6068 + if (!state->scheduled) {
6069 + /* cpu is idle. */
6070 + ce->scheduled = NULL;
6071 + ce->deadline = ULLONG_MAX;
6072 + ce->lv = NUM_CRIT_LEVELS;
6073 + } else if (lv == CRIT_LEVEL_C) {
6074 + ce->scheduled = state->scheduled;
6075 + ce->deadline = get_deadline(state->scheduled);
6076 + ce->lv = lv;
6077 + } else if (lv < CRIT_LEVEL_C) {
6078 + /* If cpu is running level A or B tasks, it is not eligible
6079 + to run level-C tasks */
6080 + ce->scheduled = state->scheduled;
6081 + ce->deadline = 0;
6082 + ce->lv = lv;
6083 + }
6084 +};
6085 +
6086 +/* on_scheduling_timer - timer event for partitioned tasks
6087 + */
6088 +static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
6089 +{
6090 + unsigned long flags;
6091 + enum hrtimer_restart restart = HRTIMER_NORESTART;
6092 + struct mc2_cpu_state *state;
6093 + lt_t update, now;
6094 + int global_schedule_now;
6095 + int reschedule[NR_CPUS];
6096 + int cpus;
6097 +
6098 + if (mode_changed)
6099 + return restart;
6100 +
6101 + for (cpus = 0; cpus<NR_CPUS; cpus++)
6102 + reschedule[cpus] = 0;
6103 +
6104 + state = container_of(timer, struct mc2_cpu_state, timer);
6105 +
6106 + /* The scheduling timer should only fire on the local CPU, because
6107 + * otherwise deadlocks via timer_cancel() are possible.
6108 + * Note: this does not interfere with dedicated interrupt handling, as
6109 + * even under dedicated interrupt handling scheduling timers for
6110 + * budget enforcement must occur locally on each CPU.
6111 + */
6112 + BUG_ON(state->cpu != raw_smp_processor_id());
6113 +
6114 + TS_ISR_START;
6115 +
6116 + TRACE("Timer fired at %llu\n", litmus_clock());
6117 + raw_spin_lock_irqsave(&state->lock, flags);
6118 + now = litmus_clock();
6119 + sup_update_time(state->sup_env, now);
6120 +
6121 + update = state->sup_env->next_scheduler_update;
6122 + now = state->sup_env->env.current_time;
6123 +
6124 + if (update <= now) {
6125 + litmus_reschedule_local();
6126 + } else if (update != SUP_NO_SCHEDULER_UPDATE) {
6127 + hrtimer_set_expires(timer, ns_to_ktime(update));
6128 + restart = HRTIMER_RESTART;
6129 + }
6130 +
6131 + raw_spin_lock(&global_lock);
6132 + global_schedule_now = gmp_update_time(_global_env, litmus_clock());
6133 + BUG_ON(global_schedule_now < 0 || global_schedule_now > 4);
6134 +
6135 + /* Find the lowest cpu, and call reschedule */
6136 + while (global_schedule_now--) {
6137 + int cpu = get_lowest_prio_cpu(0);
6138 + if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
6139 + _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
6140 + if (cpu == state->cpu && update > now)
6141 + ;
6142 + else
6143 + reschedule[cpu] = 1;
6144 + }
6145 + }
6146 + raw_spin_unlock(&global_lock);
6147 + raw_spin_unlock_irqrestore(&state->lock, flags);
6148 +
6149 + TS_ISR_END;
6150 +
6151 + for (cpus = 0; cpus<NR_CPUS; cpus++) {
6152 + if (reschedule[cpus]) {
6153 + litmus_reschedule(cpus);
6154 + }
6155 + }
6156 +
6157 + return restart;
6158 +}
6159 +
6160 +#define INIT_PHASE_LENGTH_NS (1000000000)
6161 +
6162 +/* mc2_complete_job - syscall backend for job completions
6163 + */
6164 +static long mc2_complete_job(void)
6165 +{
6166 + ktime_t next_release;
6167 + long err;
6168 +
6169 + enum crit_level lv = get_task_crit_level(current);
6170 +
6171 + raw_spin_lock(&mode_lock);
6172 + tsk_rt(current)->completed = 1;
6173 + raw_spin_unlock(&mode_lock);
6174 +
6175 + if (atomic_read(&num_sync_released) == 0 && mode != 0) {
6176 + tsk_rt(current)->sporadic_release = 0;
6177 + TRACE_CUR("num_sync_released is 0\n");
6178 + }
6179 +
6180 + /* If this the first job instance, we need to reset replenish
6181 + time to the next release time */
6182 + if (tsk_rt(current)->sporadic_release) {
6183 + struct mc2_cpu_state *state;
6184 + struct mc2_task_state *tinfo;
6185 + struct reservation *res = NULL;
6186 + unsigned long flags;
6187 +
6188 + local_irq_save(flags);
6189 +
6190 + tinfo = get_mc2_state(current);
6191 +
6192 + if (lv < CRIT_LEVEL_C) {
6193 + int i;
6194 + state = cpu_state_for(tinfo->cpu);
6195 + raw_spin_lock(&state->lock);
6196 + for (i = 0; i<NR_MODES; i++) {
6197 + if (in_mode(current,i) || i == 0) {
6198 + state->sup_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no);
6199 + }
6200 + }
6201 + res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id);
6202 +
6203 + }
6204 + else if (lv == CRIT_LEVEL_C) {
6205 + int i;
6206 + state = local_cpu_state();
6207 + raw_spin_lock(&state->lock);
6208 + raw_spin_lock(&global_lock);
6209 + for (i = 0; i < NR_MODES; i++) {
6210 + if (in_mode(current,i) || i == 0) {
6211 + _global_env_modes[i].env.time_zero = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no);
6212 + }
6213 + }
6214 + res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id);
6215 + }
6216 + else
6217 + BUG();
6218 +
6219 + /* set next_replenish to synchronous release time */
6220 + BUG_ON(!res);
6221 + res->next_replenishment = tsk_rt(current)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(current)->job_params.job_no);
6222 +/*
6223 + if (get_task_crit_level(current) == CRIT_LEVEL_A) {
6224 + struct table_driven_reservation *tdres;
6225 + tdres = container_of(res, struct table_driven_reservation, res);
6226 + tdres->next_interval = 0;
6227 + tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time;
6228 + res->next_replenishment += tdres->intervals[0].start;
6229 + }
6230 +*/
6231 + res->cur_budget = 0;
6232 + res->env->change_state(res->env, res, RESERVATION_DEPLETED);
6233 +
6234 + TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update);
6235 + if (lv == CRIT_LEVEL_C){
6236 + raw_spin_unlock(&global_lock);
6237 + }
6238 + raw_spin_unlock(&state->lock);
6239 + local_irq_restore(flags);
6240 + }
6241 +
6242 + sched_trace_task_completion(current, 0);
6243 + /* update the next release time and deadline */
6244 + prepare_for_next_period(current);
6245 + sched_trace_task_release(current);
6246 + next_release = ns_to_ktime(get_release(current));
6247 + preempt_disable();
6248 + TRACE_CUR("next_release=%llu\n", get_release(current));
6249 +
6250 + /*
6251 + * Changed logic for mode switch case
6252 + * In case of mode switch, do not want to release
6253 + * new job even if release time has passed
6254 + */
6255 +
6256 + if (lv == CRIT_LEVEL_C && pending) {
6257 + struct reservation *res = NULL;
6258 +
6259 + raw_spin_lock(&mode_lock);
6260 + res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id);
6261 + if (res && !res->reported){
6262 + res_reported--;
6263 + TRACE_CUR("RES_REPORTED = %d\n", res_reported);
6264 + res->reported = 1;
6265 + }
6266 + raw_spin_unlock(&mode_lock);
6267 + }
6268 +
6269 +
6270 + if (get_release(current) > litmus_clock()) {
6271 + /* sleep until next_release */
6272 + set_current_state(TASK_INTERRUPTIBLE);
6273 + preempt_enable_no_resched();
6274 + TRACE_CUR("Sleep until %llu\n", next_release);
6275 + err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS);
6276 + } else {
6277 + /* release the next job immediately */
6278 + err = 0;
6279 + TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock());
6280 + preempt_enable();
6281 + }
6282 +
6283 + TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock());
6284 +
6285 + raw_spin_lock(&mode_lock);
6286 + tsk_rt(current)->completed = 0;
6287 + raw_spin_unlock(&mode_lock);
6288 +
6289 + return err;
6290 +}
6291 +
6292 +/* mc2_dispatch - Select the next task to schedule.
6293 + */
6294 +struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state)
6295 +{
6296 + struct reservation *res, *next;
6297 + struct task_struct *tsk = NULL;
6298 + enum crit_level lv;
6299 + lt_t time_slice;
6300 +
6301 + list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
6302 + if (res->state == RESERVATION_ACTIVE) {
6303 + tsk = res->ops->dispatch_client(res, &time_slice);
6304 + if (likely(tsk)) {
6305 + lv = get_task_crit_level(tsk);
6306 + if (lv == NUM_CRIT_LEVELS) {
6307 + sup_scheduler_update_after(sup_env, res->cur_budget);
6308 + return tsk;
6309 + } else {
6310 + sup_scheduler_update_after(sup_env, res->cur_budget);
6311 + res->blocked_by_ghost = 0;
6312 + res->is_ghost = NO_CPU;
6313 + return tsk;
6314 + }
6315 + }
6316 + }
6317 + }
6318 +
6319 + return NULL;
6320 +}
6321 +
6322 +struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
6323 +{
6324 + struct reservation *res, *next;
6325 + struct task_struct *tsk = NULL;
6326 + enum crit_level lv;
6327 + lt_t time_slice;
6328 +
6329 + list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) {
6330 + BUG_ON(!res);
6331 + if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
6332 + tsk = res->ops->dispatch_client(res, &time_slice);
6333 + if (pending && res->reported && !in_mode(tsk, requested_mode)){
6334 + TRACE_TASK(tsk, "Rejected because task not in requested mode %d\n", requested_mode);
6335 + continue;
6336 + }
6337 + if (likely(tsk)) {
6338 + lv = get_task_crit_level(tsk);
6339 + if (lv != CRIT_LEVEL_C)
6340 + BUG();
6341 +#if BUDGET_ENFORCEMENT_AT_C
6342 + gmp_add_event_after(_global_env, res->cur_budget, res->id, EVENT_DRAIN);
6343 +#endif
6344 + res->event_added = 1;
6345 + res->blocked_by_ghost = 0;
6346 + res->is_ghost = NO_CPU;
6347 + TRACE_TASK(res->tsk, "R%d global dispatched on %d\n", res->id, state->cpu);
6348 + res->scheduled_on = state->cpu;
6349 + return tsk;
6350 + }
6351 + }
6352 + }
6353 + return NULL;
6354 +}
6355 +
6356 +static inline void pre_schedule(struct task_struct *prev, int cpu)
6357 +{
6358 + TS_SCHED_A_START;
6359 + TS_SCHED_C_START;
6360 +
6361 + if (!prev || !is_realtime(prev))
6362 + return;
6363 +
6364 + do_partition(CRIT_LEVEL_C, cpu);
6365 +}
6366 +
6367 +static inline void post_schedule(struct task_struct *next, int cpu)
6368 +{
6369 + enum crit_level lev;
6370 + if ((!next) || !is_realtime(next))
6371 + return;
6372 +
6373 + lev = get_task_crit_level(next);
6374 + if (is_mode_poll_task(next)) {
6375 + lev = MODE_POLL_TASK;
6376 + }
6377 +
6378 + do_partition(lev, cpu);
6379 +
6380 + switch(lev) {
6381 + case CRIT_LEVEL_A:
6382 + case CRIT_LEVEL_B:
6383 + case MODE_POLL_TASK:
6384 + TS_SCHED_A_END(next);
6385 + break;
6386 + case CRIT_LEVEL_C:
6387 + TS_SCHED_C_END(next);
6388 + break;
6389 + default:
6390 + break;
6391 + }
6392 +
6393 +}
6394 +
6395 +/* mc2_schedule - main scheduler function. pick the next task to run
6396 + */
6397 +static struct task_struct* mc2_schedule(struct task_struct * prev)
6398 +{
6399 + int np, blocks, exists;
6400 + /* next == NULL means "schedule background work". */
6401 + lt_t now = litmus_clock();
6402 + struct mc2_cpu_state *state = local_cpu_state();
6403 +
6404 + raw_spin_lock(&state->lock);
6405 +
6406 + pre_schedule(prev, state->cpu);
6407 +
6408 + BUG_ON(state->scheduled && state->scheduled != prev);
6409 + BUG_ON(state->scheduled && !is_realtime(prev));
6410 +
6411 + /* (0) Determine state */
6412 + exists = state->scheduled != NULL;
6413 + blocks = exists && !is_current_running();
6414 + np = exists && is_np(state->scheduled);
6415 +
6416 + /* update time */
6417 + state->sup_env->will_schedule = true;
6418 + sup_update_time(state->sup_env, now);
6419 +
6420 + if (is_realtime(current) && blocks) {
6421 + if (get_task_crit_level(current) == CRIT_LEVEL_C){
6422 + raw_spin_lock(&global_lock);
6423 + }
6424 + task_departs(current, is_completed(current));
6425 + if (get_task_crit_level(current) == CRIT_LEVEL_C){
6426 + raw_spin_unlock(&global_lock);
6427 + }
6428 + }
6429 +
6430 + /* figure out what to schedule next */
6431 + if (!np)
6432 + state->scheduled = mc2_dispatch(state->sup_env, state);
6433 +
6434 + if (!state->scheduled) {
6435 + raw_spin_lock(&global_lock);
6436 + if (is_realtime(prev))
6437 + gmp_update_time(_global_env, now);
6438 + state->scheduled = mc2_global_dispatch(state);
6439 + _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
6440 + update_cpu_prio(state);
6441 + raw_spin_unlock(&global_lock);
6442 + } else {
6443 + raw_spin_lock(&global_lock);
6444 + _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
6445 + update_cpu_prio(state);
6446 + raw_spin_unlock(&global_lock);
6447 + }
6448 +
6449 + /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */
6450 + sched_state_task_picked();
6451 +
6452 + /* program scheduler timer */
6453 + state->sup_env->will_schedule = false;
6454 +
6455 + /* NOTE: drops state->lock */
6456 + mc2_update_timer_and_unlock(state);
6457 +
6458 + raw_spin_lock(&state->lock);
6459 + if (prev != state->scheduled && is_realtime(prev)) {
6460 + struct mc2_task_state* tinfo = get_mc2_state(prev);
6461 + struct reservation* res = tinfo->res_info[mode].client.reservation;
6462 + if (res) {
6463 + res->scheduled_on = NO_CPU;
6464 + }
6465 + TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock());
6466 + /* if prev is preempted and a global task, find the lowest cpu and reschedule */
6467 + if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
6468 + int cpu;
6469 + raw_spin_lock(&global_lock);
6470 + cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY);
6471 + if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
6472 + _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
6473 + resched_cpu[cpu] = 1;
6474 + }
6475 + raw_spin_unlock(&global_lock);
6476 + }
6477 + }
6478 +
6479 + post_schedule(state->scheduled, state->cpu);
6480 +
6481 + raw_spin_unlock(&state->lock);
6482 + if (state->scheduled) {
6483 + TRACE_TASK(state->scheduled, "scheduled.\n");
6484 + }
6485 +
6486 + return state->scheduled;
6487 +}
6488 +
6489 +static void resume_legacy_task_model_updates(struct task_struct *tsk)
6490 +{
6491 + lt_t now;
6492 + if (is_sporadic(tsk)) {
6493 + /* If this sporadic task was gone for a "long" time and woke up past
6494 + * its deadline, then give it a new budget by triggering a job
6495 + * release. This is purely cosmetic and has no effect on the
6496 + * MC2 scheduler. */
6497 +
6498 + now = litmus_clock();
6499 + if (is_tardy(tsk, now)) {
6500 + release_at(tsk, now);
6501 + }
6502 + }
6503 +}
6504 +
6505 +/* mc2_task_resume - Called when the state of tsk changes back to
6506 + * TASK_RUNNING. We need to requeue the task.
6507 + */
6508 +static void mc2_task_resume(struct task_struct *tsk)
6509 +{
6510 + unsigned long flags;
6511 + struct mc2_task_state* tinfo;
6512 + struct mc2_cpu_state *state;
6513 +
6514 + TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
6515 +
6516 + preempt_disable();
6517 + tinfo = get_mc2_state(tsk);
6518 + if (tinfo->cpu != -1)
6519 + state = cpu_state_for(tinfo->cpu);
6520 + else
6521 + state = local_cpu_state();
6522 + preempt_enable();
6523 +
6524 + /* Requeue only if self-suspension was already processed. */
6525 + if (tinfo->has_departed)
6526 + {
6527 +#ifdef CONFIG_SCHED_OVERHEAD_TRACE
6528 + switch(get_task_crit_level(tsk)) {
6529 + case CRIT_LEVEL_A:
6530 + TS_RELEASE_LATENCY_A(get_release(tsk));
6531 + break;
6532 + case CRIT_LEVEL_B:
6533 + TS_RELEASE_LATENCY_B(get_release(tsk));
6534 + break;
6535 + case CRIT_LEVEL_C:
6536 + TS_RELEASE_LATENCY_C(get_release(tsk));
6537 + break;
6538 + default:
6539 + break;
6540 + }
6541 +#endif
6542 + /* We don't want to consider jobs in the initialization mode */
6543 + if (tsk_rt(tsk)->job_params.job_no == 2) {
6544 + tsk_mc2_data(tsk)->init_finished = 1;
6545 + atomic_dec(&num_sync_released);
6546 +
6547 + if (atomic_read(&num_sync_released) == 0) {
6548 + lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no);
6549 + sys_request_mode(1);
6550 + sched_trace_sys_start(&start);
6551 + }
6552 + }
6553 +
6554 + raw_spin_lock_irqsave(&state->lock, flags);
6555 +
6556 + /* Assumption: litmus_clock() is synchronized across cores,
6557 + * since we might not actually be executing on tinfo->cpu
6558 + * at the moment. */
6559 + if (tinfo->cpu != -1) {
6560 + sup_update_time(state->sup_env, litmus_clock());
6561 + task_arrives(state, tsk);
6562 + } else {
6563 + raw_spin_lock(&global_lock);
6564 + gmp_update_time(_global_env, litmus_clock());
6565 + task_arrives(state, tsk);
6566 + raw_spin_unlock(&global_lock);
6567 + }
6568 +
6569 + /* NOTE: drops state->lock */
6570 + TRACE_TASK(tsk, "mc2_resume()\n");
6571 + raw_spin_unlock_irqrestore(&state->lock, flags);
6572 +
6573 + raw_spin_lock(&state->lock);
6574 + mc2_update_timer_and_unlock(state);
6575 + } else {
6576 + TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
6577 + }
6578 +
6579 + resume_legacy_task_model_updates(tsk);
6580 +}
6581 +
6582 +
6583 +/* mc2_admit_task - Setup mc2 task parameters
6584 + */
6585 +static long mc2_admit_task(struct task_struct *tsk)
6586 +{
6587 + long err = 0;
6588 + unsigned long flags;
6589 + struct reservation *res;
6590 + struct mc2_cpu_state *state;
6591 + struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC);
6592 + struct mc2_task *mp = tsk_rt(tsk)->mc2_data;
6593 + enum crit_level lv;
6594 + int i;
6595 +
6596 + TRACE_TASK(tsk, "MC2 admitting task\n");
6597 + if (!tinfo)
6598 + return -ENOMEM;
6599 +
6600 + if (!mp) {
6601 + TRACE("mc2_admit_task: criticality level has not been set\n");
6602 + return -ESRCH;
6603 + }
6604 +
6605 + lv = mp->crit;
6606 +
6607 + if (lv < CRIT_LEVEL_C) {
6608 + state = cpu_state_for(task_cpu(tsk));
6609 + raw_spin_lock_irqsave(&state->lock, flags);
6610 +
6611 + tinfo->mc2_param.crit = mp->crit;
6612 + tinfo->cpu = task_cpu(tsk);
6613 + tinfo->has_departed = true;
6614 + tinfo->mc2_param.res_id = mp->res_id;
6615 + tinfo->mc2_param.mode_mask = mp->mode_mask;
6616 + tinfo->mc2_param.init_finished = 0;
6617 +
6618 + res = sup_find_by_id(&(state->sup_env_modes[0]), mp->res_id);
6619 +
6620 + /* found the appropriate reservation */
6621 + if (res) {
6622 + /* initial values */
6623 + err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res);
6624 + }
6625 + else {
6626 + //failed to find an expected reservation
6627 + err = -ESRCH;
6628 + }
6629 +
6630 + for(i = 1; i < NR_MODES; i++){
6631 + if (!in_mode(tsk, i)){
6632 + // task not present in mode
6633 + continue;
6634 + }
6635 + res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id);
6636 +
6637 + /* found the appropriate reservation */
6638 + if (res) {
6639 + /* initial values */
6640 + err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res);
6641 + }
6642 + else{
6643 + //failed to find an expected reservation
6644 + err = -ESRCH;
6645 + }
6646 + }
6647 +
6648 + if (!err){
6649 + /* disable LITMUS^RT's per-thread budget enforcement */
6650 + tsk_rt(tsk)->plugin_state = tinfo;
6651 + tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
6652 + }
6653 +
6654 + if (is_mode_poll_task(tsk) && tinfo->cpu == 0) {
6655 + cpu_0_task_exist = true;
6656 + }
6657 + atomic_inc(&num_sync_released);
6658 + raw_spin_unlock_irqrestore(&state->lock, flags);
6659 + } else if (lv == CRIT_LEVEL_C) {
6660 + state = local_cpu_state();
6661 + raw_spin_lock_irqsave(&state->lock, flags);
6662 +
6663 + tinfo->mc2_param.crit = mp->crit;
6664 + tinfo->cpu = -1;
6665 + tinfo->has_departed = true;
6666 + tinfo->mc2_param.res_id = mp->res_id;
6667 + tinfo->mc2_param.mode_mask = mp->mode_mask;
6668 + tinfo->mc2_param.init_finished = 0;
6669 +
6670 + raw_spin_lock(&global_lock);
6671 + res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id);
6672 +
6673 + /* found the appropriate reservation */
6674 + if (res) {
6675 + /* initial values */
6676 + err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res);
6677 + }
6678 + else {
6679 + //failed to find an expected reservation
6680 + err = -ESRCH;
6681 + }
6682 +
6683 + for(i = 1; i < NR_MODES; i++){
6684 + if (!in_mode(tsk, i))
6685 + continue;
6686 + res = gmp_find_by_id(&(_global_env_modes[i]), mp->res_id);
6687 +
6688 + /* found the appropriate reservation (or vCPU) */
6689 + if (res) {
6690 + /* initial values */
6691 + err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res);
6692 +
6693 + }
6694 + }
6695 + raw_spin_unlock(&global_lock);
6696 +
6697 + if (!err){
6698 + /* disable LITMUS^RT's per-thread budget enforcement */
6699 + tsk_rt(tsk)->plugin_state = tinfo;
6700 + tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
6701 + raw_spin_lock(&mode_lock);
6702 + for(i = 1; i < NR_MODES; i++){
6703 + if (in_mode(tsk, i)){
6704 + mode_sizes[i]++;
6705 + }
6706 + }
6707 + mode_sizes[0]++;
6708 + raw_spin_unlock(&mode_lock);
6709 +
6710 + }
6711 +
6712 + atomic_inc(&num_sync_released);
6713 + raw_spin_unlock_irqrestore(&state->lock, flags);
6714 + }
6715 +
6716 + if (err)
6717 + kfree(tinfo);
6718 +
6719 + TRACE_TASK(tsk, "MC2 task admitted %d\n", err);
6720 + return err;
6721 +}
6722 +
6723 +/* mc2_task_new - A new real-time job is arrived. Release the next job
6724 + * at the next reservation replenish time
6725 + */
6726 +static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
6727 + int is_running)
6728 +{
6729 + unsigned long flags;
6730 + struct mc2_task_state* tinfo = get_mc2_state(tsk);
6731 + struct mc2_cpu_state *state;
6732 + struct reservation *res;
6733 + enum crit_level lv = get_task_crit_level(tsk);
6734 + lt_t release = 0;
6735 +
6736 + BUG_ON(lv < CRIT_LEVEL_A || lv > CRIT_LEVEL_C);
6737 +
6738 + TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
6739 + litmus_clock(), on_runqueue, is_running);
6740 +
6741 + if (tinfo->cpu == -1)
6742 + state = local_cpu_state();
6743 + else
6744 + state = cpu_state_for(tinfo->cpu);
6745 +
6746 +
6747 + if (is_running) {
6748 + state->scheduled = tsk;
6749 + /* make sure this task should actually be running */
6750 + litmus_reschedule_local();
6751 + }
6752 +
6753 + /* acquire the lock protecting the state and disable interrupts */
6754 + local_irq_save(flags);
6755 + raw_spin_lock(&state->lock);
6756 +
6757 + if (lv == CRIT_LEVEL_C) {
6758 + raw_spin_lock(&global_lock);
6759 + res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id);
6760 + }
6761 + else {
6762 + res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id);
6763 + }
6764 + BUG_ON(!res);
6765 +
6766 + // the current mode doesn't have this task.
6767 + // do not update timer and set the next release time.
6768 +
6769 + if (on_runqueue || is_running) {
6770 + /* Assumption: litmus_clock() is synchronized across cores
6771 + * [see comment in pres_task_resume()] */
6772 + if (lv == CRIT_LEVEL_C) {
6773 + gmp_update_time(_global_env, litmus_clock());
6774 + }
6775 + else
6776 + sup_update_time(state->sup_env, litmus_clock());
6777 +
6778 + task_arrives(state, tsk);
6779 + if (lv == CRIT_LEVEL_C){
6780 + raw_spin_unlock(&global_lock);
6781 + }
6782 + /* NOTE: drops state->lock */
6783 + raw_spin_unlock(&state->lock);
6784 + local_irq_restore(flags);
6785 +
6786 + raw_spin_lock(&state->lock);
6787 + mc2_update_timer_and_unlock(state);
6788 + } else {
6789 + if (lv == CRIT_LEVEL_C){
6790 + raw_spin_unlock(&global_lock);
6791 + }
6792 + raw_spin_unlock(&state->lock);
6793 + local_irq_restore(flags);
6794 + }
6795 + release = res->next_replenishment;
6796 +
6797 + if (!release) {
6798 + BUG();
6799 + }
6800 + else
6801 + TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release);
6802 +}
6803 +
6804 +/* mc2_reservation_destroy - reservation_destroy system call backend
6805 + */
6806 +static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
6807 +{
6808 + long ret = -EINVAL;
6809 + struct mc2_cpu_state *state;
6810 + struct reservation *res = NULL, *next;
6811 + struct sup_reservation_environment *sup_env;
6812 + int found = 0;
6813 + unsigned long flags;
6814 + int i;
6815 +
6816 + if (cpu == -1) {
6817 + struct next_timer_event *event, *e_next;
6818 + local_irq_save(flags);
6819 + raw_spin_lock(&global_lock);
6820 +
6821 + /* if the reservation is global reservation,
6822 + * delete reservation id in all modes */
6823 + for(i = 0; i < NR_MODES; i++) {
6824 + list_for_each_entry_safe(res, next, &_global_env_modes[i].depleted_reservations, list) {
6825 + if (res->id == reservation_id) {
6826 + list_del(&res->list);
6827 + kfree(res);
6828 + found = 1;
6829 + ret = 0;
6830 + }
6831 + }
6832 + if (!found) {
6833 + list_for_each_entry_safe(res, next, &_global_env_modes[i].inactive_reservations, list) {
6834 + if (res->id == reservation_id) {
6835 + list_del(&res->list);
6836 + kfree(res);
6837 + found = 1;
6838 + ret = 0;
6839 + }
6840 + }
6841 + }
6842 + if (!found) {
6843 + list_for_each_entry_safe(res, next, &_global_env_modes[i].active_reservations, list) {
6844 + if (res->id == reservation_id) {
6845 + list_del(&res->list);
6846 + kfree(res);
6847 + found = 1;
6848 + ret = 0;
6849 + }
6850 + }
6851 + }
6852 +
6853 + list_for_each_entry_safe(event, e_next, &_global_env_modes[i].next_events, list) {
6854 + if (event->id == reservation_id) {
6855 + list_del(&event->list);
6856 + kfree(event);
6857 + }
6858 + }
6859 + }
6860 +
6861 + raw_spin_unlock(&global_lock);
6862 + local_irq_restore(flags);
6863 + } else {
6864 + /* if the reservation is partitioned reservation */
6865 + state = cpu_state_for(cpu);
6866 + for (i = 0; i < NR_MODES; i++){
6867 + local_irq_save(flags);
6868 + raw_spin_lock(&state->lock);
6869 + sup_env = &(state->sup_env_modes[i]);
6870 + list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
6871 + if (res->id == reservation_id) {
6872 +/*
6873 + if (lv == CRIT_LEVEL_A) {
6874 + struct table_driven_reservation *tdres;
6875 + tdres = container_of(res, struct table_driven_reservation, res);
6876 + kfree(tdres->intervals);
6877 + }
6878 +*/
6879 + list_del(&res->list);
6880 + kfree(res);
6881 + found = 1;
6882 + ret = 0;
6883 + }
6884 + }
6885 + if (!found) {
6886 + list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
6887 + if (res->id == reservation_id) {
6888 +/* if (lv == CRIT_LEVEL_A) {
6889 + struct table_driven_reservation *tdres;
6890 + tdres = container_of(res, struct table_driven_reservation, res);
6891 + kfree(tdres->intervals);
6892 + }
6893 +*/
6894 + list_del(&res->list);
6895 + kfree(res);
6896 + found = 1;
6897 + ret = 0;
6898 + }
6899 + }
6900 + }
6901 + if (!found) {
6902 + list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
6903 + if (res->id == reservation_id) {
6904 +/* if (lv == CRIT_LEVEL_A) {
6905 + struct table_driven_reservation *tdres;
6906 + tdres = container_of(res, struct table_driven_reservation, res);
6907 + kfree(tdres->intervals);
6908 + }
6909 +*/
6910 + list_del(&res->list);
6911 + kfree(res);
6912 + found = 1;
6913 + ret = 0;
6914 + }
6915 + }
6916 + }
6917 +
6918 + raw_spin_unlock(&state->lock);
6919 + local_irq_restore(flags);
6920 + }
6921 + }
6922 +
6923 + TRACE("Rerservation destroyed ret = %d\n", ret);
6924 + return ret;
6925 +}
6926 +
6927 +/* mc2_task_exit - Task became a normal task (not real-time task)
6928 + */
6929 +static void mc2_task_exit(struct task_struct *tsk)
6930 +{
6931 + unsigned long flags;
6932 + struct mc2_task_state* tinfo = get_mc2_state(tsk);
6933 + struct mc2_cpu_state *state;
6934 + enum crit_level lv = tinfo->mc2_param.crit;
6935 + int cpu, i;
6936 +
6937 + local_irq_save(flags);
6938 + if (tinfo->cpu != -1)
6939 + state = cpu_state_for(tinfo->cpu);
6940 + else
6941 + state = local_cpu_state();
6942 +
6943 + raw_spin_lock(&state->lock);
6944 +
6945 + if (state->scheduled == tsk)
6946 + state->scheduled = NULL;
6947 +
6948 + /* remove from queues */
6949 + if (is_running(tsk)) {
6950 + /* Assumption: litmus_clock() is synchronized across cores
6951 + * [see comment in pres_task_resume()] */
6952 +
6953 + /* update both global and partitioned */
6954 + if (lv < CRIT_LEVEL_C) {
6955 + sup_update_time(state->sup_env, litmus_clock());
6956 + }
6957 + else if (lv == CRIT_LEVEL_C) {
6958 + raw_spin_lock(&global_lock);
6959 + gmp_update_time(_global_env, litmus_clock());
6960 + }
6961 + task_departs(tsk, 0);
6962 + if (lv == CRIT_LEVEL_C){
6963 + raw_spin_unlock(&global_lock);
6964 + }
6965 + atomic_dec(&num_sync_released);
6966 +
6967 + /* NOTE: drops state->lock */
6968 + mc2_update_timer_and_unlock(state);
6969 + } else {
6970 + raw_spin_unlock(&state->lock);
6971 + }
6972 +
6973 + if (lv == CRIT_LEVEL_C) {
6974 + raw_spin_lock(&mode_lock);
6975 + for(i = 1; i < NR_MODES; i++){
6976 + if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) )
6977 + continue;
6978 + mode_sizes[i]--;
6979 + }
6980 + mode_sizes[0]--;
6981 + raw_spin_unlock(&mode_lock);
6982 +
6983 + for_each_online_cpu(cpu) {
6984 + state = cpu_state_for(cpu);
6985 + if (state == local_cpu_state())
6986 + continue;
6987 + raw_spin_lock(&state->lock);
6988 +
6989 + if (state->scheduled == tsk)
6990 + state->scheduled = NULL;
6991 +
6992 + raw_spin_unlock(&state->lock);
6993 + }
6994 + }
6995 +
6996 + local_irq_restore(flags);
6997 +
6998 + if (is_mode_poll_task(tsk)) {
6999 + mode_poll_exited = true;
7000 + }
7001 +
7002 + kfree(tsk_rt(tsk)->plugin_state);
7003 + tsk_rt(tsk)->plugin_state = NULL;
7004 + kfree(tsk_rt(tsk)->mc2_data);
7005 + tsk_rt(tsk)->mc2_data = NULL;
7006 +}
7007 +
7008 +/* create_polling_reservation - create a new polling reservation
7009 + */
7010 +static long create_polling_reservation(
7011 + int res_type,
7012 + struct reservation_config *config)
7013 +{
7014 + struct mc2_cpu_state *state = NULL;
7015 + struct polling_reservation *pres;
7016 + unsigned long flags;
7017 + int use_edf = config->priority == LITMUS_NO_PRIORITY;
7018 + int periodic = res_type == PERIODIC_POLLING;
7019 + long err = -EINVAL;
7020 + bool resExist = false;
7021 +
7022 + /* sanity checks */
7023 + if (config->polling_params.budget >
7024 + config->polling_params.period) {
7025 + printk(KERN_ERR "invalid polling reservation (%u): " "budget > period\n", config->id);
7026 + return -EINVAL;
7027 + }
7028 + if (config->polling_params.budget >
7029 + config->polling_params.relative_deadline
7030 + && config->polling_params.relative_deadline) {
7031 + printk(KERN_ERR "invalid polling reservation (%u): "
7032 + "budget > deadline\n", config->id);
7033 + return -EINVAL;
7034 + }
7035 + if (config->polling_params.offset >
7036 + config->polling_params.period) {
7037 + printk(KERN_ERR "invalid polling reservation (%u): "
7038 + "offset > period\n", config->id);
7039 + return -EINVAL;
7040 + }
7041 + //Added sanity check for mode
7042 + if (config->mode < 0 || config->mode >= NR_MODES){
7043 + printk(KERN_ERR "invalid polling reservation (%u): "
7044 + "Mode outside range\n", config->id);
7045 + return -EINVAL;
7046 + }
7047 +
7048 + /* Allocate before we grab a spin lock.
7049 + * Todo: would be nice to use a core-local allocation.
7050 + */
7051 + pres = kzalloc(sizeof(*pres), GFP_KERNEL);
7052 + if (!pres)
7053 + return -ENOMEM;
7054 +
7055 + TRACE("CREATE_POLLING_RESERVATION id %d mode %d\n", config->id, config->mode);
7056 + if (config->cpu != -1) {
7057 + int i, is_exist = 0;
7058 +
7059 + state = cpu_state_for(config->cpu);
7060 + raw_spin_lock_irqsave(&state->lock, flags);
7061 +
7062 + /* check if it is the first creation of reservartion */
7063 + for (i = 0; i < NR_MODES; i++) {
7064 + if( sup_find_by_id(&(state->sup_env_modes[i]), config->id) )
7065 + is_exist = 1;
7066 + }
7067 + if (!is_exist && config->mode != 0) {
7068 + /* create mode 0 reservation first */
7069 + struct polling_reservation *pres_0 = kzalloc(sizeof(*pres_0), GFP_ATOMIC);
7070 +
7071 + TRACE_CUR("The first mode_num = %d\n",config->mode);
7072 +
7073 + if (!pres_0) {
7074 + raw_spin_unlock_irqrestore(&state->lock, flags);
7075 + kfree(pres);
7076 + return -ENOMEM;
7077 + }
7078 + polling_reservation_init(pres_0, use_edf, periodic,
7079 + config->polling_params.budget,
7080 + config->polling_params.period,
7081 + config->polling_params.relative_deadline,
7082 + config->polling_params.offset);
7083 + pres_0->res.id = config->id;
7084 + pres_0->res.blocked_by_ghost = 0;
7085 + pres_0->res.is_ghost = NO_CPU;
7086 + pres_0->res.mode = config->mode;
7087 +
7088 + if (!use_edf)
7089 + pres_0->res.priority = config->priority;
7090 + sup_add_new_reservation(&(state->sup_env_modes[0]), &pres_0->res);
7091 + TRACE_CUR("SUP reservation created R%d for mode 0 priority : %llu\n", config->id, pres_0->res.priority);
7092 + pres_0->res.reported = 0;
7093 + pres_0->res.tsk = current;
7094 + }
7095 +
7096 + //force reservation id unique inside of res_config->mode
7097 + if( sup_find_by_id(&(state->sup_env_modes[config->mode]), config->id) ){
7098 + resExist = true;
7099 + }
7100 + if (!resExist) {
7101 + polling_reservation_init(pres, use_edf, periodic,
7102 + config->polling_params.budget,
7103 + config->polling_params.period,
7104 + config->polling_params.relative_deadline,
7105 + config->polling_params.offset);
7106 + pres->res.id = config->id;
7107 + pres->res.blocked_by_ghost = 0;
7108 + pres->res.is_ghost = NO_CPU;
7109 + pres->res.mode = config->mode;
7110 + if (!use_edf)
7111 + pres->res.priority = config->priority;
7112 + sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &pres->res);
7113 + err = config->id;
7114 + TRACE_CUR("SUP reservation created R%d for mode %d priority : %llu\n", config->id, config->mode, pres->res.priority);
7115 + } else {
7116 + err = -EEXIST;
7117 + }
7118 +
7119 + raw_spin_unlock_irqrestore(&state->lock, flags);
7120 +
7121 + } else {
7122 + int i, is_exist = 0;
7123 + raw_spin_lock_irqsave(&global_lock, flags);
7124 +
7125 + /* check if it is the first creation of reservartion */
7126 + for (i = 0; i < NR_MODES; i++) {
7127 + if(gmp_find_by_id(&(_global_env_modes[i]), config->id))
7128 + is_exist = 1;
7129 + }
7130 + if (!is_exist && config->mode != 0) {
7131 + /* create mode 0 reservation first */
7132 + struct polling_reservation *pres_0 = kzalloc(sizeof(*pres_0), GFP_ATOMIC);
7133 +
7134 + TRACE_CUR("The first mode_num = %d\n",config->mode);
7135 +
7136 + if (!pres_0) {
7137 + raw_spin_unlock_irqrestore(&global_lock, flags);
7138 + kfree(pres);
7139 + return -ENOMEM;
7140 + }
7141 + polling_reservation_init(pres_0, use_edf, periodic,
7142 + config->polling_params.budget,
7143 + config->polling_params.period,
7144 + config->polling_params.relative_deadline,
7145 + config->polling_params.offset);
7146 + pres_0->res.id = config->id;
7147 + pres_0->res.blocked_by_ghost = 0;
7148 + pres_0->res.scheduled_on = NO_CPU;
7149 + pres_0->res.is_ghost = NO_CPU;
7150 + pres_0->res.mode = config->mode;
7151 +
7152 + if (!use_edf)
7153 + pres_0->res.priority = config->priority;
7154 + gmp_add_new_reservation(&(_global_env_modes[0]), &pres_0->res);
7155 + TRACE_CUR("GMP reservation created R%d for mode 0 priority : %llu\n", config->id, pres_0->res.priority);
7156 + pres_0->res.reported = 0;
7157 + pres_0->res.tsk = current;
7158 + }
7159 +
7160 + //force id's unique within desired mode
7161 + if (gmp_find_by_id(&(_global_env_modes[config->mode]), config->id)){
7162 + resExist = true;
7163 + }
7164 + if (!resExist) {
7165 + polling_reservation_init(pres, use_edf, periodic,
7166 + config->polling_params.budget,
7167 + config->polling_params.period,
7168 + config->polling_params.relative_deadline,
7169 + config->polling_params.offset);
7170 + pres->res.id = config->id;
7171 + pres->res.blocked_by_ghost = 0;
7172 + pres->res.scheduled_on = NO_CPU;
7173 + pres->res.is_ghost = NO_CPU;
7174 + pres->res.mode = config->mode;
7175 + if (!use_edf)
7176 + pres->res.priority = config->priority;
7177 + gmp_add_new_reservation(&(_global_env_modes[config->mode]), &pres->res);
7178 + TRACE_CUR("GMP reservation created R%d for mode %d priority : %llu\n", config->id, config->mode, pres->res.priority);
7179 + err = config->id;
7180 + } else {
7181 + err = -EEXIST;
7182 + }
7183 + raw_spin_unlock_irqrestore(&global_lock, flags);
7184 + }
7185 +
7186 +
7187 + pres->res.reported = 0;
7188 + pres->res.tsk = current;
7189 +
7190 + if (err < 0)
7191 + kfree(pres);
7192 +
7193 + return err;
7194 +}
7195 +
7196 +#define MAX_INTERVALS 1024
7197 +
7198 +/* create_table_driven_reservation - create a table_driven reservation
7199 + */
7200 +static long create_table_driven_reservation(
7201 + struct reservation_config *config)
7202 +{
7203 + struct mc2_cpu_state *state;
7204 + //struct reservation* res = NULL;
7205 + struct table_driven_reservation *td_res = NULL;
7206 + struct lt_interval *slots = NULL;
7207 + size_t slots_size;
7208 + unsigned int i, num_slots;
7209 + unsigned long flags;
7210 + long err = -EINVAL;
7211 + bool resExist = false;
7212 +
7213 + if (!config->table_driven_params.num_intervals) {
7214 + printk(KERN_ERR "invalid table-driven reservation (%u): "
7215 + "no intervals\n", config->id);
7216 + return -EINVAL;
7217 + }
7218 +
7219 + if (config->table_driven_params.num_intervals > MAX_INTERVALS) {
7220 + printk(KERN_ERR "invalid table-driven reservation (%u): "
7221 + "too many intervals (max: %d)\n", config->id, MAX_INTERVALS);
7222 + return -EINVAL;
7223 + }
7224 +
7225 + if (config->mode >= NR_MODES || config->mode < 0){
7226 + printk(KERN_ERR "invalid table-driven reservation (%u): "
7227 + "mode outside of range\n", config->id);
7228 + return -EINVAL;
7229 + }
7230 +
7231 + num_slots = config->table_driven_params.num_intervals;
7232 + slots_size = sizeof(slots[0]) * num_slots;
7233 + slots = kzalloc(slots_size, GFP_KERNEL);
7234 + if (!slots)
7235 + return -ENOMEM;
7236 +
7237 + td_res = kzalloc(sizeof(*td_res), GFP_KERNEL);
7238 + if (!td_res)
7239 + err = -ENOMEM;
7240 + else
7241 + err = copy_from_user(slots,
7242 + config->table_driven_params.intervals, slots_size);
7243 +
7244 + if (!err) {
7245 + /* sanity checks */
7246 + for (i = 0; !err && i < num_slots; i++)
7247 + if (slots[i].end <= slots[i].start) {
7248 + printk(KERN_ERR
7249 + "invalid table-driven reservation (%u): "
7250 + "invalid interval %u => [%llu, %llu]\n",
7251 + config->id, i,
7252 + slots[i].start, slots[i].end);
7253 + err = -EINVAL;
7254 + }
7255 +
7256 + for (i = 0; !err && i + 1 < num_slots; i++)
7257 + if (slots[i + 1].start <= slots[i].end) {
7258 + printk(KERN_ERR
7259 + "invalid table-driven reservation (%u): "
7260 + "overlapping intervals %u, %u\n",
7261 + config->id, i, i + 1);
7262 + err = -EINVAL;
7263 + }
7264 +
7265 + if (slots[num_slots - 1].end >
7266 + config->table_driven_params.major_cycle_length) {
7267 + printk(KERN_ERR
7268 + "invalid table-driven reservation (%u): last "
7269 + "interval ends past major cycle %llu > %llu\n",
7270 + config->id,
7271 + slots[num_slots - 1].end,
7272 + config->table_driven_params.major_cycle_length);
7273 + err = -EINVAL;
7274 + }
7275 + }
7276 +
7277 + if (!err) {
7278 + state = cpu_state_for(config->cpu);
7279 + raw_spin_lock_irqsave(&state->lock, flags);
7280 +
7281 + //force unique id's across all modes
7282 + for(i = 0; i < NR_MODES; i++){
7283 + if (sup_find_by_id(&(state->sup_env_modes[i]), config->id)){
7284 + resExist = true;
7285 + break;
7286 + }
7287 + }
7288 + if (!resExist) {
7289 + table_driven_reservation_init(td_res,
7290 + config->table_driven_params.major_cycle_length,
7291 + slots, num_slots);
7292 + td_res->res.id = config->id;
7293 + td_res->res.priority = config->priority;
7294 + td_res->res.blocked_by_ghost = 0;
7295 + td_res->res.mode = config->mode;
7296 + sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &td_res->res);
7297 + err = config->id;
7298 + } else {
7299 + err = -EEXIST;
7300 + }
7301 +
7302 + raw_spin_unlock_irqrestore(&state->lock, flags);
7303 + }
7304 +
7305 + td_res->res.reported = 0;
7306 + td_res->res.tsk = current;
7307 +
7308 + if (err < 0) {
7309 + kfree(slots);
7310 + kfree(td_res);
7311 + }
7312 +
7313 + return err;
7314 +}
7315 +
7316 +/* mc2_reservation_create - reservation_create system call backend
7317 + */
7318 +static long mc2_reservation_create(int res_type, void* __user _config)
7319 +{
7320 + long ret = -EINVAL;
7321 + struct reservation_config config;
7322 +
7323 + TRACE("Attempt to create reservation (%d)\n", res_type);
7324 +
7325 + if (copy_from_user(&config, _config, sizeof(config)))
7326 + return -EFAULT;
7327 +
7328 + TRACE("Attempt to create reservation id %d mode %d\n", config.id, config.mode);
7329 +
7330 + if (config.cpu != -1) {
7331 + if (config.cpu < 0 || !cpu_online(config.cpu)) {
7332 + printk(KERN_ERR "invalid polling reservation (%u): "
7333 + "CPU %d offline\n", config.id, config.cpu);
7334 + return -EINVAL;
7335 + }
7336 + }
7337 +
7338 + switch (res_type) {
7339 + case PERIODIC_POLLING:
7340 + case SPORADIC_POLLING:
7341 + ret = create_polling_reservation(res_type, &config);
7342 + break;
7343 +
7344 + case TABLE_DRIVEN:
7345 + ret = create_table_driven_reservation(&config);
7346 + break;
7347 +
7348 + default:
7349 + return -EINVAL;
7350 + };
7351 +
7352 + return ret;
7353 +}
7354 +
7355 +static struct domain_proc_info mc2_domain_proc_info;
7356 +
7357 +static long mc2_get_domain_proc_info(struct domain_proc_info **ret)
7358 +{
7359 + *ret = &mc2_domain_proc_info;
7360 + return 0;
7361 +}
7362 +
7363 +static void mc2_setup_domain_proc(void)
7364 +{
7365 + int i, cpu;
7366 + int num_rt_cpus = num_online_cpus();
7367 +
7368 + struct cd_mapping *cpu_map, *domain_map;
7369 +
7370 + memset(&mc2_domain_proc_info, sizeof(mc2_domain_proc_info), 0);
7371 + init_domain_proc_info(&mc2_domain_proc_info, num_rt_cpus, num_rt_cpus);
7372 + mc2_domain_proc_info.num_cpus = num_rt_cpus;
7373 + mc2_domain_proc_info.num_domains = num_rt_cpus;
7374 +
7375 + i = 0;
7376 + for_each_online_cpu(cpu) {
7377 + cpu_map = &mc2_domain_proc_info.cpu_to_domains[i];
7378 + domain_map = &mc2_domain_proc_info.domain_to_cpus[i];
7379 +
7380 + cpu_map->id = cpu;
7381 + domain_map->id = i;
7382 + cpumask_set_cpu(i, cpu_map->mask);
7383 + cpumask_set_cpu(cpu, domain_map->mask);
7384 + ++i;
7385 + }
7386 +}
7387 +
7388 +static long mc2_activate_plugin(void)
7389 +{
7390 + int cpu;
7391 + struct mc2_cpu_state *state;
7392 + struct cpu_entry *ce;
7393 + int i;
7394 +
7395 + for(i = 0; i < NR_MODES; i++){
7396 + gmp_init(&(_global_env_modes[i]));
7397 + }
7398 + _global_env = &_global_env_modes[0];
7399 +
7400 + raw_spin_lock_init(&mode_lock);
7401 + raw_spin_lock_init(&global_lock);
7402 +
7403 + seen_once = false;
7404 +
7405 + for_each_online_cpu(cpu) {
7406 + TRACE("Initializing CPU%d...\n", cpu);
7407 +
7408 + resched_cpu[cpu] = 0;
7409 + this_cpu_write(mode_counter, 0);
7410 +
7411 + state = cpu_state_for(cpu);
7412 + ce = &_lowest_prio_cpu.cpu_entries[cpu];
7413 +
7414 + ce->cpu = cpu;
7415 + ce->scheduled = NULL;
7416 + ce->deadline = ULLONG_MAX;
7417 + ce->lv = NUM_CRIT_LEVELS;
7418 + ce->will_schedule = false;
7419 +
7420 + raw_spin_lock_init(&state->lock);
7421 + printk(KERN_ALERT "CPU%d state->lock %p\n", cpu, &state->lock);
7422 + state->cpu = cpu;
7423 + state->scheduled = NULL;
7424 +
7425 + for(i = 0; i < NR_MODES; i++){
7426 + sup_init(&(state->sup_env_modes[i]));
7427 + }
7428 + state->sup_env = &(state->sup_env_modes[0]);
7429 +
7430 + hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
7431 + state->timer.function = on_scheduling_timer;
7432 + this_cpu_write(mode_counter, 0);
7433 + }
7434 +
7435 + mc2_setup_domain_proc();
7436 +
7437 + mode_poll_exited = false;
7438 +
7439 + mode = 0;
7440 + requested_mode = 0;
7441 +
7442 + for(i = 0; i < NR_MODES; i++){
7443 + mode_sizes[i] = 0;
7444 + }
7445 + res_reported = 0;
7446 +
7447 + return 0;
7448 +}
7449 +
7450 +static void mc2_finish_switch(struct task_struct *prev)
7451 +{
7452 + int cpus;
7453 + enum crit_level lv = get_task_crit_level(prev);
7454 + struct mc2_cpu_state *state = local_cpu_state();
7455 +
7456 + state->scheduled = is_realtime(current) ? current : NULL;
7457 + if (lv == CRIT_LEVEL_C) {
7458 + for (cpus = 0; cpus<NR_CPUS; cpus++) {
7459 + if (resched_cpu[cpus]) {
7460 + resched_cpu[cpus] = 0;
7461 + litmus_reschedule(cpus);
7462 + }
7463 + }
7464 + }
7465 +}
7466 +
7467 +static long mc2_deactivate_plugin(void)
7468 +{
7469 + int cpu;
7470 + struct mc2_cpu_state *state;
7471 + struct reservation *res;
7472 + struct next_timer_event *event;
7473 + struct cpu_entry *ce;
7474 + int i;
7475 +
7476 + for_each_online_cpu(cpu) {
7477 + state = cpu_state_for(cpu);
7478 + raw_spin_lock(&state->lock);
7479 +
7480 + hrtimer_cancel(&state->timer);
7481 +
7482 + ce = &_lowest_prio_cpu.cpu_entries[cpu];
7483 +
7484 + ce->cpu = cpu;
7485 + ce->scheduled = NULL;
7486 + ce->deadline = ULLONG_MAX;
7487 + ce->lv = NUM_CRIT_LEVELS;
7488 + ce->will_schedule = false;
7489 +
7490 +
7491 + for(i = 0; i < NR_MODES; i++){
7492 + /* Delete all reservations --- assumes struct reservation
7493 + * is prefix of containing struct. */
7494 + state->sup_env = &(state->sup_env_modes[i]);
7495 + while (!list_empty(&state->sup_env->active_reservations)) {
7496 + res = list_first_entry(
7497 + &state->sup_env->active_reservations,
7498 + struct reservation, list);
7499 + list_del(&res->list);
7500 + kfree(res);
7501 + }
7502 +
7503 + while (!list_empty(&state->sup_env->inactive_reservations)) {
7504 + res = list_first_entry(
7505 + &state->sup_env->inactive_reservations,
7506 + struct reservation, list);
7507 + list_del(&res->list);
7508 + kfree(res);
7509 + }
7510 +
7511 + while (!list_empty(&state->sup_env->depleted_reservations)) {
7512 + res = list_first_entry(
7513 + &state->sup_env->depleted_reservations,
7514 + struct reservation, list);
7515 + list_del(&res->list);
7516 + kfree(res);
7517 + }
7518 + }
7519 +
7520 + raw_spin_unlock(&state->lock);
7521 + }
7522 +
7523 + raw_spin_lock(&global_lock);
7524 + for(i = 0; i < NR_MODES; i++){
7525 + _global_env = &_global_env_modes[i];
7526 + while (!list_empty(&_global_env->active_reservations)) {
7527 + res = list_first_entry(
7528 + &_global_env->active_reservations,
7529 + struct reservation, list);
7530 + list_del(&res->list);
7531 + kfree(res);
7532 + }
7533 +
7534 + while (!list_empty(&_global_env->inactive_reservations)) {
7535 + res = list_first_entry(
7536 + &_global_env->inactive_reservations,
7537 + struct reservation, list);
7538 + list_del(&res->list);
7539 + kfree(res);
7540 + }
7541 +
7542 + while (!list_empty(&_global_env->depleted_reservations)) {
7543 + res = list_first_entry(
7544 + &_global_env->depleted_reservations,
7545 + struct reservation, list);
7546 + list_del(&res->list);
7547 + kfree(res);
7548 + }
7549 +
7550 + while (!list_empty(&_global_env->next_events)) {
7551 + event = list_first_entry(
7552 + &_global_env->next_events,
7553 + struct next_timer_event, list);
7554 + list_del(&event->list);
7555 + kfree(event);
7556 + }
7557 +
7558 + }
7559 + raw_spin_unlock(&global_lock);
7560 +
7561 + atomic_set(&num_sync_released, 0);
7562 + destroy_domain_proc_info(&mc2_domain_proc_info);
7563 + return 0;
7564 +}
7565 +
7566 +static struct sched_plugin mc2_plugin = {
7567 + .plugin_name = "MC2",
7568 + .schedule = mc2_schedule,
7569 + .finish_switch = mc2_finish_switch,
7570 + .task_wake_up = mc2_task_resume,
7571 + .admit_task = mc2_admit_task,
7572 + .task_new = mc2_task_new,
7573 + .task_exit = mc2_task_exit,
7574 + .complete_job = mc2_complete_job,
7575 + .get_domain_proc_info = mc2_get_domain_proc_info,
7576 + .activate_plugin = mc2_activate_plugin,
7577 + .deactivate_plugin = mc2_deactivate_plugin,
7578 + .reservation_create = mc2_reservation_create,
7579 + .reservation_destroy = mc2_reservation_destroy,
7580 +};
7581 +
7582 +static int __init init_mc2(void)
7583 +{
7584 + return register_sched_plugin(&mc2_plugin);
7585 +}
7586 +
7587 +module_init(init_mc2);
7588 diff --git litmus/sched_plugin.c litmus/sched_plugin.c
7589 index edd91e9..7b1eba0 100644
7590 --- litmus/sched_plugin.c
7591 +++ litmus/sched_plugin.c
7592 @@ -13,6 +13,7 @@
7593 #include <litmus/sched_plugin.h>
7594 #include <litmus/preempt.h>
7595 #include <litmus/jobs.h>
7596 +#include <litmus/budget.h>
7597
7598 /*
7599 * Generic function to trigger preemption on either local or remote cpu
7600 @@ -197,6 +198,9 @@ int register_sched_plugin(struct sched_plugin* plugin)
7601 if (!plugin->wait_for_release_at)
7602 plugin->wait_for_release_at = default_wait_for_release_at;
7603
7604 + if (!plugin->current_budget)
7605 + plugin->current_budget = litmus_current_budget;
7606 +
7607 raw_spin_lock(&sched_plugins_lock);
7608 list_add(&plugin->list, &sched_plugins);
7609 raw_spin_unlock(&sched_plugins_lock);
7610 diff --git litmus/sched_psn_edf.c litmus/sched_psn_edf.c
7611 index 2549a3f..76a57af 100644
7612 --- litmus/sched_psn_edf.c
7613 +++ litmus/sched_psn_edf.c
7614 @@ -243,7 +243,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
7615 if (next) {
7616 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
7617 } else {
7618 - TRACE("becoming idle at %llu\n", litmus_clock());
7619 + ; //TRACE("becoming idle at %llu\n", litmus_clock());
7620 }
7621
7622 pedf->scheduled = next;
7623 @@ -644,10 +644,14 @@ static long psnedf_admit_task(struct task_struct* tsk)
7624 /* don't allow tasks on release master CPU */
7625 && task_cpu(tsk) != remote_edf(task_cpu(tsk))->release_master
7626 #endif
7627 - )
7628 + ) {
7629 + TRACE_TASK(tsk, "admitted\n");
7630 return 0;
7631 - else
7632 + }
7633 + else {
7634 + TRACE_TASK(tsk, "not admitted\n");
7635 return -EINVAL;
7636 + }
7637 }
7638
7639 /* Plugin object */
7640 diff --git litmus/sched_task_trace.c litmus/sched_task_trace.c
7641 index c154ec4..d844180 100644
7642 --- litmus/sched_task_trace.c
7643 +++ litmus/sched_task_trace.c
7644 @@ -189,6 +189,7 @@ feather_callback void do_sched_trace_task_completion(unsigned long id,
7645 if (rec) {
7646 rec->data.completion.when = now();
7647 rec->data.completion.forced = forced;
7648 + rec->data.completion.exec_time = get_exec_time(t);
7649 put_record(rec);
7650 }
7651 }
7652 @@ -240,3 +241,39 @@ feather_callback void do_sched_trace_action(unsigned long id,
7653 put_record(rec);
7654 }
7655 }
7656 +
7657 +feather_callback void do_sched_trace_enact_mode(unsigned long id,
7658 + unsigned long _task)
7659 +{
7660 + struct task_struct *t = (struct task_struct*) _task;
7661 + struct st_event_record* rec = get_record(ST_ENACT_MODE, t);
7662 +
7663 + if (rec) {
7664 + rec->data.enact_mode.when = now();
7665 + put_record(rec);
7666 + }
7667 +}
7668 +
7669 +feather_callback void do_sched_trace_request_mode(unsigned long id,
7670 + unsigned long _task)
7671 +{
7672 + struct task_struct *t = (struct task_struct*) _task;
7673 + struct st_event_record* rec = get_record(ST_REQUEST_MODE, t);
7674 +
7675 + if (rec) {
7676 + rec->data.request_mode.when = now();
7677 + put_record(rec);
7678 + }
7679 +}
7680 +
7681 +feather_callback void do_sched_trace_sys_start(unsigned long id,
7682 + unsigned long _start)
7683 +{
7684 + lt_t *start = (lt_t*) _start;
7685 + struct st_event_record* rec = get_record(ST_SYS_START, NULL);
7686 + if (rec) {
7687 + rec->data.sys_start.when = now();
7688 + rec->data.sys_start.start = *start;
7689 + put_record(rec);
7690 + }
7691 +}
7692 \ No newline at end of file
7693 diff --git litmus/sync.c litmus/sync.c
7694 index 5d18060..7733f67 100644
7695 --- litmus/sync.c
7696 +++ litmus/sync.c
7697 @@ -16,6 +16,8 @@
7698
7699 #include <litmus/sched_trace.h>
7700
7701 +atomic_t num_sync_released;
7702 +
7703 struct ts_release_wait {
7704 struct list_head list;
7705 struct completion completion;
7706 @@ -147,6 +149,6 @@ asmlinkage long sys_release_ts(lt_t __user *__delay)
7707 start_time *= ONE_MS;
7708 ret = do_release_ts(start_time + delay);
7709 }
7710 -
7711 + atomic_set(&num_sync_released, ret);
7712 return ret;
7713 }
Attached Files
To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.You are not allowed to attach a file to this page.