Attachment 'gedf-mp-rtas14.patch'
Download 1 diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
2 index 3a3c2f1..66af7c5 100644
3 --- a/arch/x86/include/asm/entry_arch.h
4 +++ b/arch/x86/include/asm/entry_arch.h
5 @@ -14,6 +14,7 @@ BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
6 BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
7 BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
8 BUILD_INTERRUPT(pull_timers_interrupt,PULL_TIMERS_VECTOR)
9 +BUILD_INTERRUPT(litmus_mailbox_interrupt,LITMUS_MAILBOX_VECTOR)
10 BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
11 BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
12 #endif
13 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
14 index 672de93..bef3c8d 100644
15 --- a/arch/x86/include/asm/hw_irq.h
16 +++ b/arch/x86/include/asm/hw_irq.h
17 @@ -79,6 +79,8 @@ extern void call_function_single_interrupt(void);
18
19 extern void pull_timers_interrupt(void);
20
21 +extern void litmus_mailbox_interrupt(void);
22 +
23 /* IOAPIC */
24 #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
25 extern unsigned long io_apic_irqs;
26 @@ -169,6 +171,7 @@ extern void smp_reschedule_interrupt(struct pt_regs *);
27 extern void smp_call_function_interrupt(struct pt_regs *);
28 extern void smp_call_function_single_interrupt(struct pt_regs *);
29 extern void smp_pull_timers_interrupt(struct pt_regs *);
30 +extern void smp_litmus_mailbox_interrupt(struct pt_regs *);
31 #ifdef CONFIG_X86_32
32 extern void smp_invalidate_interrupt(struct pt_regs *);
33 #else
34 diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
35 index 224116b..5f3f422 100644
36 --- a/arch/x86/include/asm/irq_vectors.h
37 +++ b/arch/x86/include/asm/irq_vectors.h
38 @@ -130,6 +130,8 @@
39 */
40 #define PULL_TIMERS_VECTOR 0xdf
41
42 +#define LITMUS_MAILBOX_VECTOR 0xde
43 +
44 #define NR_VECTORS 256
45
46 #define FPU_IRQ 13
47 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
48 index a48b3ea..9d9e8bf 100644
49 --- a/arch/x86/kernel/entry_64.S
50 +++ b/arch/x86/kernel/entry_64.S
51 @@ -1185,6 +1185,8 @@ apicinterrupt RESCHEDULE_VECTOR \
52 reschedule_interrupt smp_reschedule_interrupt
53 apicinterrupt PULL_TIMERS_VECTOR \
54 pull_timers_interrupt smp_pull_timers_interrupt
55 +apicinterrupt LITMUS_MAILBOX_VECTOR \
56 + litmus_mailbox_interrupt smp_litmus_mailbox_interrupt
57 #endif
58
59 apicinterrupt ERROR_APIC_VECTOR \
60 diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
61 index 77979d9..2eaacdc 100644
62 --- a/arch/x86/kernel/irqinit.c
63 +++ b/arch/x86/kernel/irqinit.c
64 @@ -148,6 +148,9 @@ static void __init smp_intr_init(void)
65 /* IPI for hrtimer pulling on remote cpus */
66 alloc_intr_gate(PULL_TIMERS_VECTOR, pull_timers_interrupt);
67
68 + /* IPI for LITMUS^RT's dedicated scheduler mailbox */
69 + alloc_intr_gate(LITMUS_MAILBOX_VECTOR, litmus_mailbox_interrupt);
70 +
71 /* Low priority IPI to cleanup after moving an irq */
72 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
73 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
74 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
75 index becf5c3..b7bd9fd 100644
76 --- a/arch/x86/kernel/smp.c
77 +++ b/arch/x86/kernel/smp.c
78 @@ -26,6 +26,7 @@
79
80 #include <litmus/preempt.h>
81 #include <litmus/debug_trace.h>
82 +#include <litmus/mailbox.h>
83
84 #include <asm/mtrr.h>
85 #include <asm/tlbflush.h>
86 @@ -176,6 +177,15 @@ void smp_send_pull_timers(int cpu)
87 apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR);
88 }
89
90 +void smp_send_mailbox(int cpu)
91 +{
92 + if (unlikely(cpu_is_offline(cpu))) {
93 + WARN_ON(1);
94 + return;
95 + }
96 + apic->send_IPI_mask(cpumask_of(cpu), LITMUS_MAILBOX_VECTOR);
97 +}
98 +
99 /*
100 * this function calls the 'stop' function on all other CPUs in the system.
101 */
102 @@ -314,6 +324,15 @@ void smp_pull_timers_interrupt(struct pt_regs *regs)
103 irq_exit();
104 }
105
106 +void smp_litmus_mailbox_interrupt(struct pt_regs *regs)
107 +{
108 + irq_enter();
109 + mailbox_arrived();
110 + ack_APIC_irq();
111 + inc_irq_stat(irq_call_count);
112 + irq_exit();
113 +}
114 +
115 struct smp_ops smp_ops = {
116 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
117 .smp_prepare_cpus = native_smp_prepare_cpus,
118 diff --git a/include/linux/smp.h b/include/linux/smp.h
119 index 4f78ea7..3426260 100644
120 --- a/include/linux/smp.h
121 +++ b/include/linux/smp.h
122 @@ -88,6 +88,8 @@ void kick_all_cpus_sync(void);
123 */
124 extern void smp_send_pull_timers(int cpu);
125
126 +extern void smp_send_mailbox(int cpu);
127 +
128 /*
129 * Generic and arch helpers
130 */
131 diff --git a/include/litmus/mailbox.h b/include/litmus/mailbox.h
132 new file mode 100644
133 index 0000000..2ae69f5
134 --- /dev/null
135 +++ b/include/litmus/mailbox.h
136 @@ -0,0 +1,24 @@
137 +#ifndef LITMUS_MAILBOX_H
138 +#define LITMUS_MAILBOX_H
139 +
140 +#include <linux/types.h>
141 +#include <linux/cache.h>
142 +#include <linux/percpu.h>
143 +#include <asm/atomic.h>
144 +#include <linux/spinlock.h>
145 +
146 +#include <litmus/ftdev.h>
147 +#include <litmus/feather_buffer.h>
148 +
149 +typedef void (*mailbox_callback_f)(unsigned int sender_id, void *arg);
150 +
151 +#define _NUM_MAILBOXES 8
152 +#define NUM_MAILBOXES (_NUM_MAILBOXES > NR_CPUS ? NR_CPUS : _NUM_MAILBOXES)
153 +
154 +void init_mailbox_buffer(void);
155 +void add_mailbox_call(mailbox_callback_f callback, unsigned int sender_id, void *arg);
156 +void mailbox_arrived(void);
157 +
158 +void mailbox_broadcast(mailbox_callback_f callback, unsigned int sender_id, void *arg);
159 +
160 +#endif
161 diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
162 index e26535b..282d4fd 100644
163 --- a/include/litmus/rt_param.h
164 +++ b/include/litmus/rt_param.h
165 @@ -251,6 +251,15 @@ struct rt_param {
166 /* PFAIR/PD^2 state. Allocated on demand. */
167 struct pfair_param* pfair;
168
169 + /* Per-task release timer */
170 + struct hrtimer release_timer;
171 +
172 + /* Dedicated scheduler */
173 + volatile int safe_to_exit;
174 + int job_completed;
175 + int job_suspended;
176 + int job_exited;
177 +
178 /* Fields saved before BE->RT transition.
179 */
180 int old_policy;
181 diff --git a/include/litmus/trace.h b/include/litmus/trace.h
182 index 6367355..48c9756 100644
183 --- a/include/litmus/trace.h
184 +++ b/include/litmus/trace.h
185 @@ -28,14 +28,23 @@ struct timestamp {
186 };
187
188 /* tracing callbacks */
189 -feather_callback void msg_sent(unsigned long event, unsigned long to);
190 -feather_callback void msg_received(unsigned long event);
191 +feather_callback void msg_sent_to(unsigned long event, unsigned long to);
192 +feather_callback void msg_received_local(unsigned long event);
193 +
194 +feather_callback void msg_sent_local(unsigned long event);
195 +feather_callback void msg_received_from(unsigned long event, unsigned long from);
196
197 #define MSG_TIMESTAMP_SENT(id, to) \
198 - ft_event1(id, msg_sent, (unsigned long) to);
199 + ft_event1(id, msg_sent_to, (unsigned long) (to));
200
201 #define MSG_TIMESTAMP_RECEIVED(id) \
202 - ft_event0(id, msg_received);
203 + ft_event0(id, msg_received_local);
204 +
205 +#define MSG_TIMESTAMP_SENT_LOCAL(id) \
206 + ft_event0(id, msg_sent_local);
207 +
208 +#define MSG_TIMESTAMP_RECEIVED_FROM(id, from) \
209 + ft_event1(id, msg_received_from, (unsigned long) (from))
210
211 feather_callback void save_cpu_timestamp(unsigned long event);
212 feather_callback void save_cpu_timestamp_time(unsigned long event, unsigned long time_ptr);
213 @@ -134,6 +143,12 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
214 #define TS_SEND_RESCHED_START(c) MSG_TIMESTAMP_SENT(190, c)
215 #define TS_SEND_RESCHED_END MSG_TIMESTAMP_RECEIVED(191)
216
217 +#define TS_CLIENT_REQUEST_LATENCY_START MSG_TIMESTAMP_SENT_LOCAL(160)
218 +#define TS_CLIENT_REQUEST_LATENCY_END(msg_from) MSG_TIMESTAMP_RECEIVED_FROM(161, msg_from)
219 +
220 +#define TS_DSP_HANDLER_START CPU_TIMESTAMP_CUR(162)
221 +#define TS_DSP_HANDLER_END CPU_TIMESTAMP_CUR(163)
222 +
223 #define TS_RELEASE_LATENCY(when) CPU_LTIMESTAMP(208, &(when))
224
225 #endif /* !_SYS_TRACE_H_ */
226 diff --git a/litmus/Makefile b/litmus/Makefile
227 index 2bddc94..d0f922e 100644
228 --- a/litmus/Makefile
229 +++ b/litmus/Makefile
230 @@ -21,7 +21,10 @@ obj-y = sched_plugin.o litmus.o \
231 uncachedev.o \
232 sched_gsn_edf.o \
233 sched_psn_edf.o \
234 - sched_pfp.o
235 + sched_pfp.o \
236 + mailbox.o
237 +
238 +obj-$(CONFIG_RELEASE_MASTER) += sched_gedf_mp.o
239
240 obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
241 obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
242 diff --git a/litmus/litmus.c b/litmus/litmus.c
243 index 71ef06e..1fd3ab3 100644
244 --- a/litmus/litmus.c
245 +++ b/litmus/litmus.c
246 @@ -20,6 +20,7 @@
247 #include <litmus/rt_domain.h>
248 #include <litmus/litmus_proc.h>
249 #include <litmus/sched_trace.h>
250 +#include <litmus/mailbox.h>
251
252 #ifdef CONFIG_SCHED_CPU_AFFINITY
253 #include <litmus/affinity.h>
254 @@ -607,6 +608,8 @@ static int __init _init_litmus(void)
255 init_topology();
256 #endif
257
258 + init_mailbox_buffer();
259 +
260 register_reboot_notifier(&shutdown_notifier);
261
262 return 0;
263 diff --git a/litmus/mailbox.c b/litmus/mailbox.c
264 new file mode 100644
265 index 0000000..1267271
266 --- /dev/null
267 +++ b/litmus/mailbox.c
268 @@ -0,0 +1,133 @@
269 + \
270 +#include <linux/spinlock.h>
271 +#include <linux/sched.h>
272 +
273 +#include <litmus/trace.h>
274 +#include <litmus/mailbox.h>
275 +
276 +typedef struct {
277 + mailbox_callback_f callback;
278 + unsigned int sender_id; /* cpu id of the sender (IPI) */
279 + void *arg;
280 +} __attribute__((aligned(64))) mailbox_data_t;
281 +
282 +#define MAILBOX_BUFSIZE (2 << 19)
283 +
284 +#define _CEIL(x, y) (((x) / (y)) + (((x) % (y)) != 0))
285 +
286 +#define CPUS_PER_MAILBOX _CEIL(NR_CPUS, NUM_MAILBOXES)
287 +
288 +
289 +static struct ft_buffer *mb_buf[NUM_MAILBOXES];
290 +
291 +void init_mailbox_buffer(void)
292 +{
293 + int i = 0;
294 +
295 + for (i = 0; i < NUM_MAILBOXES; i++) {
296 + mb_buf[i] = alloc_ft_buffer(MAILBOX_BUFSIZE, sizeof(mailbox_data_t));
297 + BUG_ON(!mb_buf[i]);
298 + }
299 +}
300 +
301 +static inline struct ft_buffer *choose_mailbox(unsigned int sender)
302 +{
303 + /* floor => neighboring CPUs use the same mailbox => biased towards
304 + * on-socket sharing (assuming physical neighbors are numbered
305 + * consecutively, looking at actual topology could be added as option)
306 + */
307 + return mb_buf[sender / CPUS_PER_MAILBOX];
308 +}
309 +
310 +static void __add_mailbox_call(
311 + struct ft_buffer *buf,
312 + mailbox_callback_f callback,
313 + unsigned int sender_id,
314 + void *arg)
315 +{
316 + mailbox_data_t *data;
317 +
318 + if (unlikely(!ft_buffer_start_write(buf, (void **) &data)))
319 + BUG(); /* prototype: mailbox delivery may not fail */
320 + data->callback = callback;
321 + data->arg = arg;
322 + data->sender_id = sender_id;
323 + ft_buffer_finish_write(buf, data);
324 +}
325 +
326 +void add_mailbox_call(mailbox_callback_f callback, unsigned int sender_id, void *arg)
327 +{
328 + struct ft_buffer *buf = choose_mailbox(sender_id);
329 + __add_mailbox_call(buf, callback, sender_id, arg);
330 +}
331 +
332 +
333 +void mailbox_broadcast(mailbox_callback_f callback, unsigned int sender_id, void *arg)
334 +{
335 + int i;
336 +
337 + for (i = 0; i < NUM_MAILBOXES; i++)
338 + __add_mailbox_call(mb_buf[i], callback, sender_id, arg);
339 +}
340 +
341 +static volatile int already_in_mailbox_isr = 0;
342 +
343 +void mailbox_arrived(void)
344 +{
345 + int i, loop = 1;
346 + mailbox_data_t data;
347 + unsigned long flags;
348 +
349 +
350 + /* If we are taking a nested interrupt, quit immediately and
351 + * let the outer IRQ handler finish the queue processing.
352 + */
353 + if (already_in_mailbox_isr)
354 + return;
355 + else
356 + already_in_mailbox_isr = 1;
357 +
358 + local_irq_save(flags);
359 +
360 + while (loop) {
361 + loop = 0;
362 +
363 + /* Loop over mailboxes, picking only one message from each
364 + * queue at a time to avoid starvation of higher-indexed
365 + * queues.
366 + */
367 + for (i = 0; i < NUM_MAILBOXES; i++)
368 + if (ft_buffer_read(mb_buf[i], &data)) {
369 + TS_CLIENT_REQUEST_LATENCY_END(data.sender_id);
370 + (data.callback)(data.sender_id, data.arg);
371 +
372 + /* Turn on interrupts briefly to
373 + * avoid long irq-off section.
374 + *
375 + * Lockdep will warn that turning on IRQs in
376 + * hard IRQ context is bad. The alternatives here
377 + * would be to move this loop into a kthread,
378 + * which adds context switch overhead on the
379 + * critical path, or to simply hog the core
380 + * running this loop constantly.
381 + *
382 + * Limited stack depth is only a problem if
383 + * we permit arbitrary stack growth. However,
384 + * checking already_in_mailbox_isr check ensures
385 + * that we nest ISRs at most one level deep,
386 + * which should not blow the stack.
387 + * So we simply ignore the Linux warning and
388 + * accept limited ISR recursion at this point.
389 + */
390 + local_irq_enable();
391 +
392 + BUG_ON(irqs_disabled());
393 +
394 + local_irq_disable();
395 + loop = 1;
396 + }
397 + }
398 +
399 + already_in_mailbox_isr = 0;
400 + local_irq_restore(flags);
401 +}
402 diff --git a/litmus/sched_gedf_mp.c b/litmus/sched_gedf_mp.c
403 new file mode 100644
404 index 0000000..c380178
405 --- /dev/null
406 +++ b/litmus/sched_gedf_mp.c
407 @@ -0,0 +1,1058 @@
408 +/* G-EDF with message passing
409 + */
410 +
411 +#include <linux/spinlock.h>
412 +#include <linux/percpu.h>
413 +#include <linux/sched.h>
414 +#include <linux/slab.h>
415 +#include <linux/smp.h>
416 +
417 +#include <litmus/litmus.h>
418 +#include <litmus/jobs.h>
419 +#include <litmus/sched_plugin.h>
420 +#include <litmus/edf_common.h>
421 +#include <litmus/sched_trace.h>
422 +#include <litmus/trace.h>
423 +
424 +#include <litmus/preempt.h>
425 +#include <litmus/budget.h>
426 +
427 +#include <litmus/bheap.h>
428 +
429 +#include <litmus/mailbox.h>
430 +
431 +#ifdef CONFIG_SCHED_CPU_AFFINITY
432 +#include <litmus/affinity.h>
433 +#endif
434 +
435 +/* to set up domain/cpu mappings */
436 +#include <litmus/litmus_proc.h>
437 +
438 +#include <linux/module.h>
439 +
440 +/* cpu_entry_t - maintain the linked and scheduled state
441 + */
442 +typedef struct {
443 + int cpu;
444 + struct task_struct* linked; /* only RT tasks */
445 + struct task_struct* scheduled; /* only RT tasks */
446 + raw_spinlock_t lock;
447 +} cpu_entry_t;
448 +DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, gedf_cpu_entries);
449 +
450 +cpu_entry_t* gedf_cpus[NR_CPUS];
451 +
452 +static rt_domain_t gedf;
453 +
454 +/* master state*/
455 +struct {
456 + int cpu_online[NR_CPUS];
457 + int cpu_idle[NR_CPUS];
458 + pid_t linked_pid[NR_CPUS];
459 + lt_t linked_prio[NR_CPUS];
460 + int link_idx;
461 + int min_cached;
462 +} master_state;
463 +
464 +
465 +/* Uncomment this if you want to see all scheduling decisions in the
466 + * TRACE() log.
467 +#define WANT_ALL_SCHED_EVENTS
468 +*/
469 +
470 +
471 +static inline cpu_entry_t *cpu_state(int cpu)
472 +{
473 + cpu_entry_t *entry = *(gedf_cpus + cpu);
474 + BUG_ON(cpu < 0 || cpu >= (int) NR_CPUS || cpu == gedf.release_master);
475 + return entry;
476 +}
477 +
478 +/* preempt - force a CPU to reschedule
479 + */
480 +static void preempt(cpu_entry_t *entry)
481 +{
482 + preempt_if_preemptable(entry->scheduled, entry->cpu);
483 +}
484 +
485 +/* **************** master state management ************** */
486 +
487 +/* update_cpu_position - update master's snapshot of CPU state
488 + */
489 +static void update_cpu_position(cpu_entry_t *entry)
490 +{
491 + if (entry->linked) {
492 + master_state.cpu_idle[entry->cpu] = 0;
493 + master_state.linked_pid[entry->cpu] = entry->linked->pid;
494 + master_state.linked_prio[entry->cpu] = get_deadline(entry->linked);
495 + TRACE("new pos P%d ldl:%llu lpid:%lu\n",
496 + entry->cpu,
497 + master_state.linked_prio[entry->cpu],
498 + master_state.linked_pid[entry->cpu]);
499 + } else {
500 + master_state.cpu_idle[entry->cpu] = 1;
501 + TRACE("new pos P%d now idle\n", entry->cpu);
502 + /* speed up searches for idle CPUs */
503 + master_state.link_idx = entry->cpu;
504 + }
505 + master_state.min_cached = 0;
506 +}
507 +
508 +static int gedf_cpu_valid(int cpu)
509 +{
510 + return master_state.cpu_online[cpu];
511 +}
512 +
513 +static int gedf_cpu_idle(int cpu)
514 +{
515 + return master_state.cpu_online[cpu] && master_state.cpu_idle[cpu];
516 +}
517 +
518 +static int cpu_lower_prio(int a, int b)
519 +{
520 + /* check for later deadline */
521 + if (lt_after(master_state.linked_prio[a], master_state.linked_prio[b]))
522 + return 1;
523 + /* break by PID */
524 + else if (master_state.linked_prio[a] == master_state.linked_prio[b])
525 + return master_state.linked_pid[a] > master_state.linked_pid[b];
526 + else
527 + return 0;
528 +}
529 +
530 +static int gedf_preempt(struct task_struct* task, int cpu)
531 +{
532 + TRACE_TASK(task, "preempt-check dl:%llu idle:%d ldl:%llu lpid:%lu\n",
533 + get_deadline(task), gedf_cpu_idle(cpu),
534 + master_state.linked_prio[cpu], master_state.linked_pid[cpu]);
535 + if (gedf_cpu_idle(cpu))
536 + return 1;
537 + else if (lt_before(get_deadline(task), master_state.linked_prio[cpu]))
538 + return 1;
539 + else if (get_deadline(task) == master_state.linked_prio[cpu] &&
540 + task->pid < master_state.linked_pid[cpu])
541 + return 1;
542 + else
543 + return 0;
544 +}
545 +
546 +static int find_lowest_prio_or_idle_cpu(void)
547 +{
548 + int start, pos;
549 + int min_idx;
550 +
551 + start = pos = master_state.link_idx;
552 +
553 + /* if the position is still valid, just reuse it */
554 + if (master_state.min_cached)
555 + return pos;
556 +
557 + while (!gedf_cpu_valid(pos))
558 + pos = (pos + 1) % NR_CPUS;
559 +
560 + if (gedf_cpu_idle(pos)) {
561 + master_state.link_idx = pos;
562 + return pos;
563 + } else {
564 + min_idx = pos;
565 + pos = (pos + 1) % NR_CPUS;
566 + }
567 +
568 + TRACE(">>> pre-min search start:%d pos:%d min:%d\n", start, pos, min_idx);
569 +
570 + for (; pos != start; pos = (pos + 1) % NR_CPUS) {
571 + if (gedf_cpu_idle(pos)) {
572 + min_idx = pos;
573 + break;
574 + } else if (gedf_cpu_valid(pos) &&
575 + cpu_lower_prio(pos, min_idx))
576 + min_idx = pos;
577 + TRACE(">>> min search start:%d pos:%d min:%d\n", start, pos, min_idx);
578 + }
579 +
580 + TRACE(">>> post-min search start:%d pos:%d min:%d\n", start, pos, min_idx);
581 +
582 + master_state.link_idx = min_idx;
583 + master_state.min_cached = 1;
584 + return min_idx;
585 +}
586 +
587 +
588 +
589 +/* **************** helper functions ************** */
590 +
591 +static cpu_entry_t *locked_cpu_state(int cpu)
592 +{
593 + cpu_entry_t *state = cpu_state(cpu);
594 + raw_spin_lock(&state->lock);
595 + return state;
596 +}
597 +
598 +static void unlock_cpu_state(cpu_entry_t *state)
599 +{
600 + raw_spin_unlock(&state->lock);
601 +}
602 +
603 +/* assumes interrupts off */
604 +static cpu_entry_t *lock_scheduled_on(struct task_struct *task)
605 +{
606 + int cpu;
607 + cpu_entry_t *sched_on;
608 +
609 + while (1) {
610 + cpu = tsk_rt(task)->scheduled_on;
611 +
612 + if (cpu != NO_CPU) {
613 + sched_on = locked_cpu_state(cpu);
614 + /* check if the task is scheduled */
615 + if (tsk_rt(task)->scheduled_on == cpu)
616 + /* yes, return locked */
617 + return sched_on;
618 + else
619 + /* no, moved, try again */
620 + unlock_cpu_state(sched_on);
621 + } else
622 + return NULL;
623 + }
624 +}
625 +
626 +
627 +/* assumes interrupts off */
628 +static cpu_entry_t *lock_linked_on(struct task_struct *task)
629 +{
630 + int cpu;
631 + cpu_entry_t *linked_on;
632 +
633 + while (1) {
634 + cpu = tsk_rt(task)->linked_on;
635 +
636 + if (cpu != NO_CPU) {
637 + linked_on = locked_cpu_state(cpu);
638 + /* check if the task is scheduled */
639 + if (tsk_rt(task)->linked_on == cpu) {
640 + BUG_ON(linked_on->linked != task);
641 + /* yes, return locked */
642 + return linked_on;
643 + } else
644 + /* no, moved, try again */
645 + unlock_cpu_state(linked_on);
646 + } else
647 + return NULL;
648 + }
649 +}
650 +
651 +
652 +
653 +/* **************** main scheduling functions ************** */
654 +
655 +static int task_is_stale(struct task_struct *t)
656 +{
657 + if (unlikely(tsk_rt(t)->job_completed)) {
658 + TRACE_TASK(t, "stale; completed\n");
659 + return 1;
660 + } else if (unlikely(tsk_rt(t)->job_suspended)) {
661 + TRACE_TASK(t, "stale; suspended\n");
662 + return 1;
663 + } else if (unlikely(tsk_rt(t)->job_exited)) {
664 + TRACE_TASK(t, "stale, exited\n");
665 + return 1;
666 + } else
667 + return 0;
668 +}
669 +
670 +static void queue_task(struct task_struct *task)
671 +{
672 + /* sanity check before insertion */
673 + BUG_ON(!task);
674 + BUG_ON(tsk_rt(task)->linked_on != NO_CPU);
675 +
676 + if (task_is_stale(task)) {
677 + /* no point in adding this task to anywhere if it is already stale */
678 + TRACE_TASK(task, "not queueing task b/c it is stale\n");
679 + return;
680 + }
681 +
682 + BUG_ON(is_queued(task));
683 +
684 + if (is_early_releasing(task) || is_released(task, litmus_clock())) {
685 + TRACE_TASK(task, "queue_task::add_ready\n");
686 + __add_ready(&gedf, task);
687 + } else {
688 + TRACE_TASK(task, "queue_task::add_release\n");
689 + /* it has got to wait */
690 + __add_release(&gedf, task);
691 + }
692 +}
693 +
694 +
695 +static struct task_struct * dequeue_task(void)
696 +{
697 + struct task_struct *t = NULL;
698 +
699 + /* Filter all tasks that got put in the queue
700 + * just before they became unavailable for execution. */
701 + do {
702 + t = __take_ready(&gedf);
703 + /* The flags might not be stable yet because t could
704 + * still be executing, but we filter what we can
705 + * get at this point. */
706 + } while (t && task_is_stale(t));
707 +
708 + return t;
709 +}
710 +
711 +static struct task_struct *maybe_swap(struct task_struct *t, int lowest_cpu)
712 +{
713 + struct task_struct *tmp;
714 + cpu_entry_t *entry;
715 +
716 + entry = lock_scheduled_on(t);
717 + if (entry) {
718 + /* still scheduled, should swap */
719 + TRACE_TASK(t, "swapped to P%d instead of P%d \n", entry->cpu, lowest_cpu);
720 +
721 + tmp = entry->linked;
722 + TRACE_TASK(tmp, "got swapped out\n");
723 + if (tmp)
724 + tsk_rt(tmp)->linked_on = NO_CPU;
725 +
726 + entry->linked = t;
727 + tsk_rt(t)->linked_on = entry->cpu;
728 +
729 + update_cpu_position(entry);
730 + unlock_cpu_state(entry);
731 + preempt(entry);
732 +
733 + if (entry->cpu == lowest_cpu) {
734 + /* Corner case: no true swap, we wanted to
735 + * link here anyway. */
736 + if (tmp)
737 + queue_task(tmp);
738 + tmp = NULL;
739 + }
740 +
741 + return tmp;
742 + } else if (task_is_stale(t))
743 + /* no longer scheduled => flags are stable, check again */
744 + return NULL;
745 + else
746 + /* not scheduled, still valid, ok let's go! */
747 + return t;
748 +}
749 +
750 +static void check_for_preemptions(void)
751 +{
752 + struct task_struct *t, *preempted;
753 + int cpu;
754 + cpu_entry_t *entry;
755 +
756 + while (1) {
757 + t = dequeue_task();
758 +
759 + TRACE_TASK(t, "considered for scheduling\n");
760 +
761 + if (!t) {
762 + TRACE("EMPTY\n");
763 + break;
764 + }
765 +
766 + BUG_ON(tsk_rt(t)->linked_on != NO_CPU);
767 +
768 + cpu = find_lowest_prio_or_idle_cpu();
769 +
770 + if (gedf_preempt(t, cpu)) {
771 + t = maybe_swap(t, cpu);
772 + if (t) {
773 + entry = locked_cpu_state(cpu);
774 +
775 + preempted = entry->linked;
776 + if (preempted)
777 + tsk_rt(preempted)->linked_on = NO_CPU;
778 + entry->linked = t;
779 + tsk_rt(t)->linked_on = cpu;
780 +
781 + /* Check for race: the task might _just_ become
782 + * stale. After we set linked_on, we need to
783 + * check again for staleness. The task_exit code
784 + * does the reverse: it first sets job_exited and
785 + * then checks linked_on. */
786 + smp_wmb();
787 + if (task_is_stale(entry->linked)) {
788 + TRACE_TASK(entry->linked, "became stale after linking\n");
789 + /* undo preemption */
790 + tsk_rt(entry->linked)->linked_on = NO_CPU;
791 + entry->linked = NULL;
792 + } else
793 + TRACE_TASK(t, "linked to P%d\n", entry->cpu);
794 +
795 + update_cpu_position(entry);
796 +
797 + unlock_cpu_state(entry);
798 + preempt(entry);
799 +
800 + TRACE_TASK(preempted, "preempted from P%d\n", entry->cpu);
801 +
802 + if (preempted)
803 + queue_task(preempted);
804 + }
805 + } else {
806 + /* insufficient priority to preempt */
807 + queue_task(t);
808 + break;
809 + }
810 + }
811 +}
812 +
813 +static void update_cpu_position_unlocked(int cpu)
814 +{
815 + cpu_entry_t *entry = locked_cpu_state(cpu);
816 + update_cpu_position(entry);
817 + unlock_cpu_state(entry);
818 +}
819 +
820 +/* **************** message passing interface ************** */
821 +
822 +
823 +static void send_to_master(mailbox_callback_f fn, struct task_struct *t)
824 +{
825 + int cpu = smp_processor_id();
826 +
827 + if (cpu == gedf.release_master)
828 + fn(cpu, t);
829 + else {
830 + add_mailbox_call(fn, cpu, t);
831 + TS_CLIENT_REQUEST_LATENCY_START;
832 + smp_send_mailbox(gedf.release_master);
833 + }
834 +}
835 +
836 +static void on_task_new(unsigned int sender_id, void *arg)
837 +{
838 + unsigned long flags;
839 + struct task_struct *t = (struct task_struct *) arg;
840 +
841 + local_irq_save(flags);
842 + TS_DSP_HANDLER_START;
843 +
844 + TRACE_TASK(t, "%s from P%d \n", __FUNCTION__, sender_id);
845 +
846 + queue_task(t);
847 + check_for_preemptions();
848 +
849 + TS_DSP_HANDLER_END;
850 + local_irq_restore(flags);
851 +}
852 +
853 +static void on_queue_flushed(unsigned int sender_id, void *arg)
854 +{
855 + unsigned long flags;
856 + struct task_struct *t = (struct task_struct *) arg;
857 +
858 + local_irq_save(flags);
859 + TS_DSP_HANDLER_START;
860 +
861 + TRACE_TASK(t, "%s from %d \n", __FUNCTION__, sender_id);
862 +
863 + mb();
864 + tsk_rt(t)->safe_to_exit += 1;
865 +
866 + TS_DSP_HANDLER_END;
867 + local_irq_restore(flags);
868 +}
869 +
870 +#define JOB_EXIT_OFFSET 2
871 +
872 +static void on_exit(unsigned int sender_id, void *arg)
873 +{
874 + unsigned long flags;
875 + struct task_struct *t = (struct task_struct *) arg;
876 + int was_linked_on;
877 +
878 + local_irq_save(flags);
879 + TS_DSP_HANDLER_START;
880 +
881 + was_linked_on = tsk_rt(t)->job_exited - JOB_EXIT_OFFSET;
882 +
883 + TRACE_TASK(t, "%s from %d, was_linked_on:%d\n", __FUNCTION__,
884 + sender_id, was_linked_on);
885 +
886 + if (was_linked_on != NO_CPU)
887 + update_cpu_position_unlocked(was_linked_on);
888 +
889 + if (is_queued(t)) {
890 + TRACE_TASK(t, "is_queued()\n");
891 + /* FIXME: how to determine whether the task is in a release
892 + * heap? If the task happens to be in a release heap, this
893 + * will crash. As a temporary workaround, this should work as
894 + * long as tasks exit themselves (like rtspin does).
895 + */
896 +
897 + /* remove from ready queue */
898 + remove(&gedf, t);
899 + }
900 +
901 + /* If some messages including this task are still in flight, then we
902 + * will get in trouble once they arrive. To work around this problem,
903 + * we send ourself a message that, once it is received, will imply that
904 + * all messages involving this task have been processed.
905 + *
906 + * To this end, we add on_queue_flushed to the end of all
907 + * mailboxes. Since mailboxes work in FIFO order, this should flush all
908 + * pending messages.
909 + */
910 + mailbox_broadcast(on_queue_flushed, NR_CPUS, t);
911 +
912 + check_for_preemptions();
913 +
914 + TS_DSP_HANDLER_END;
915 + local_irq_restore(flags);
916 +}
917 +
918 +static void on_resume(unsigned int sender_id, void *arg)
919 +{
920 + unsigned long flags;
921 + struct task_struct *t = (struct task_struct *) arg;
922 + cpu_entry_t* cpu_state;
923 + lt_t now;
924 +
925 + local_irq_save(flags);
926 + TS_DSP_HANDLER_START;
927 +
928 + TRACE_TASK(t, "%s from P%d exited:%d\n", __FUNCTION__, sender_id,
929 + tsk_rt(t)->job_exited);
930 +
931 + if (unlikely(tsk_rt(t)->job_exited))
932 + goto out;
933 +
934 + /* It better be marked as suspended. */
935 + BUG_ON(!tsk_rt(t)->job_suspended);
936 +
937 + /* Let's make sure this task isn't currently
938 + * being processed as completed. */
939 + cpu_state = lock_scheduled_on(t);
940 + /* If cpu_state == NULL, then t is no longer scheduled
941 + * and we can go ahead and just do the update
942 + */
943 + now = litmus_clock();
944 + if (!tsk_rt(t)->completed && is_sporadic(t) && is_tardy(t, now)) {
945 + /* new sporadic release */
946 + release_at(t, now);
947 + sched_trace_task_release(t);
948 + }
949 + /* can be scheduled again */
950 + tsk_rt(t)->job_suspended = 0;
951 + if (cpu_state)
952 + unlock_cpu_state(cpu_state);
953 +
954 + queue_task(t);
955 + check_for_preemptions();
956 +
957 +out:
958 + TS_DSP_HANDLER_END;
959 + local_irq_restore(flags);
960 +}
961 +
962 +static void on_job_completion(unsigned int sender_id, void *arg)
963 +{
964 + unsigned long flags;
965 + struct task_struct *t = (struct task_struct *) arg;
966 +
967 + local_irq_save(flags);
968 + TS_DSP_HANDLER_START;
969 +
970 + TRACE_TASK(t, "%s from P%d exited:%d\n", __FUNCTION__, sender_id,
971 + tsk_rt(t)->job_exited);
972 +
973 + if (unlikely(tsk_rt(t)->job_exited))
974 + goto out;
975 +
976 + /* It better be marked as completed! */
977 + BUG_ON(!tsk_rt(t)->job_completed);
978 +
979 + /* Cannot be linked anymore! */
980 + BUG_ON(lock_linked_on(t) != NULL);
981 +
982 + /* Could have been added to ready queue in the mean time. */
983 + if (is_queued(t))
984 + remove(&gedf, t);
985 +
986 + /* Clear the flag used to detect stale tasks in the ready queue. */
987 + tsk_rt(t)->job_completed = 0;
988 + /* Clear the flag used to communicate job completions to the scheduler. */
989 + tsk_rt(t)->completed = 0;
990 + prepare_for_next_period(t);
991 + if (is_early_releasing(t) || is_released(t, litmus_clock()))
992 + sched_trace_task_release(t);
993 +
994 + queue_task(t);
995 +
996 +out:
997 + update_cpu_position_unlocked(sender_id);
998 + check_for_preemptions();
999 +
1000 +
1001 + TS_DSP_HANDLER_END;
1002 + local_irq_restore(flags);
1003 +}
1004 +
1005 +static void on_job_suspension(unsigned int sender_id, void *arg)
1006 +{
1007 + unsigned long flags;
1008 + struct task_struct *t = (struct task_struct *) arg;
1009 +
1010 + local_irq_save(flags);
1011 + TS_DSP_HANDLER_START;
1012 +
1013 + TRACE_TASK(t, "%s from P%d exited:%d\n", __FUNCTION__, sender_id,
1014 + tsk_rt(t)->job_exited);
1015 +
1016 + if (unlikely(tsk_rt(t)->job_exited))
1017 + goto out;
1018 +
1019 + /* We don't actually have to much here. The task is gone
1020 + * and will be reported to us when it resumes. However,
1021 + * we need to make sure that it wasn't queued in the mean
1022 + * while. */
1023 +
1024 + /* If job_suspended == 0, then the message raced with
1025 + * the job resuming and we simply ignore this event. */
1026 + if (tsk_rt(t)->job_suspended) {
1027 +
1028 + /* Cannot be linked anymore! */
1029 + BUG_ON(lock_linked_on(t) != NULL);
1030 +
1031 + /* Could have been added to ready queue in the mean time. */
1032 + if (is_queued(t))
1033 + remove(&gedf, t);
1034 + } else {
1035 + TRACE_TASK(t, "not suspended anymore? Ignored.\n");
1036 + }
1037 +
1038 +
1039 +out:
1040 + /* In any case, the CPU that this task was linked to
1041 + * needs to get a new assignment. */
1042 + update_cpu_position_unlocked(sender_id);
1043 + check_for_preemptions();
1044 +
1045 + TS_DSP_HANDLER_END;
1046 + local_irq_restore(flags);
1047 +}
1048 +
1049 +
1050 +#define send_task_new(t) send_to_master(on_task_new, t)
1051 +#define send_task_exit(t) send_to_master(on_exit, t)
1052 +#define send_task_resumed(t) send_to_master(on_resume, t)
1053 +#define send_job_completed(t) send_to_master(on_job_completion, t)
1054 +#define send_job_suspended(t) send_to_master(on_job_suspension, t)
1055 +
1056 +
1057 +
1058 +
1059 +/* **************** plugin callbacks ************** */
1060 +
1061 +
1062 +static noinline void gedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
1063 +{
1064 + unsigned long flags;
1065 +
1066 + local_irq_save(flags);
1067 +
1068 + TRACE("Tasks released! Checking for preemptions.\n");
1069 +
1070 + __merge_ready(rt, tasks);
1071 + check_for_preemptions();
1072 +
1073 + local_irq_restore(flags);
1074 +}
1075 +
1076 +static long gedf_admit_task(struct task_struct* t)
1077 +{
1078 + return 0;
1079 +}
1080 +
1081 +/* assumes interrupts off */
1082 +static void gedf_task_exit(struct task_struct * t)
1083 +{
1084 + cpu_entry_t* cpu_state;
1085 +
1086 + BUG_ON(!is_realtime(t));
1087 + /* flag remains non-zero even if not linked */
1088 + BUILD_BUG_ON(JOB_EXIT_OFFSET + NO_CPU <= 0);
1089 +
1090 + /* The order here is important. We first need to prevent master
1091 + * from linking us anywhere. THEN we lock_linked_on(), at which
1092 + * point we are sure that if linked_on == NO_CPU, we are
1093 + * not becoming linked afterwards. */
1094 + tsk_rt(t)->job_exited = JOB_EXIT_OFFSET + NO_CPU;
1095 + smp_wmb();
1096 +
1097 + /* let's see if this task is still in use somewhere */
1098 + cpu_state = lock_linked_on(t);
1099 + if (cpu_state) {
1100 + TRACE_TASK(t, "exit-unlinked from P%d\n", cpu_state->cpu);
1101 + /* ok, let's unlink this task */
1102 + tsk_rt(t)->linked_on = NO_CPU;
1103 + cpu_state->linked = NULL;
1104 +
1105 + /* We can't update the cpu_position here; need to let master
1106 + * do this. */
1107 + tsk_rt(t)->job_exited = JOB_EXIT_OFFSET + cpu_state->cpu;
1108 + unlock_cpu_state(cpu_state);
1109 + } else {
1110 + TRACE_TASK(t, "not linked on exit\n");
1111 + }
1112 +
1113 + cpu_state = lock_scheduled_on(t);
1114 + if (cpu_state) {
1115 + TRACE_TASK(t, "still scheduled on P%d\n", cpu_state->cpu);
1116 + /* is not going to be a real-time task any longer */
1117 + cpu_state->scheduled = NULL;
1118 + unlock_cpu_state(cpu_state);
1119 + } else
1120 + TRACE_TASK(t, "not scheduled on exit\n");
1121 +
1122 + /* let master finish the cleanup */
1123 + send_task_exit(t);
1124 +
1125 + TRACE_TASK(t, "RIP\n");
1126 +}
1127 +
1128 +/* called with interrupts on, no locks held */
1129 +static void gedf_task_cleanup(struct task_struct * t)
1130 +{
1131 + BUG_ON(is_realtime(t));
1132 +
1133 + /* wait for master to process the exit */
1134 + while (tsk_rt(t)->safe_to_exit != NUM_MAILBOXES) {
1135 + TRACE_TASK(t, "waiting for a safe exit\n");
1136 + set_current_state(TASK_INTERRUPTIBLE);
1137 + schedule_timeout(10);
1138 + }
1139 +
1140 + TRACE_TASK(t, "Cleaned!\n");
1141 +}
1142 +
1143 +/* Prepare a task for running in RT mode
1144 + */
1145 +static void gedf_task_new(struct task_struct * t, int on_rq, int is_scheduled)
1146 +{
1147 + unsigned long flags;
1148 + cpu_entry_t* entry;
1149 + int cpu;
1150 +
1151 + local_irq_save(flags);
1152 +
1153 + cpu = smp_processor_id();
1154 +
1155 + TRACE_TASK(t, "task new on_rq:%d sched:%d task_cpu:%d\n",
1156 + on_rq, is_scheduled, task_cpu(t));
1157 +
1158 + /* setup job params */
1159 + release_at(t, litmus_clock());
1160 +
1161 + tsk_rt(t)->linked_on = NO_CPU;
1162 + if (is_scheduled && task_cpu(t) != gedf.release_master) {
1163 + /* patch up CPU state */
1164 + entry = cpu_state(task_cpu(t));
1165 +
1166 + raw_spin_lock(&entry->lock);
1167 + entry->scheduled = t;
1168 + tsk_rt(t)->scheduled_on = entry->cpu;
1169 + raw_spin_unlock(&entry->lock);
1170 +
1171 + /* Tell CPU running this task to stop scheduling unlinked
1172 + * real-time task. Master will link it somewhere
1173 + * once notified, if appropriate. */
1174 + preempt(entry);
1175 + } else {
1176 + tsk_rt(t)->scheduled_on = NO_CPU;
1177 + }
1178 +
1179 + if (is_running(t)) {
1180 + /* Not suspended.
1181 + * Let master know it has something to do */
1182 + send_task_new(t);
1183 + } else
1184 + tsk_rt(t)->job_suspended = 1;
1185 +
1186 + local_irq_restore(flags);
1187 +}
1188 +
1189 +static void gedf_task_wake_up(struct task_struct *task)
1190 +{
1191 + unsigned long flags;
1192 + lt_t now;
1193 + cpu_entry_t *cpu_state;
1194 +
1195 + local_irq_save(flags);
1196 +
1197 + now = litmus_clock();
1198 +
1199 + TRACE_TASK(task, "wake_up at %llu\n", now);
1200 +
1201 + cpu_state = lock_linked_on(task);
1202 + if (cpu_state)
1203 + /* came back before anyone noticed => nothing to do */
1204 + raw_spin_unlock(&cpu_state->lock);
1205 + else
1206 + /* task became unlinked => has to go through master */
1207 + send_task_resumed(task);
1208 +
1209 + local_irq_restore(flags);
1210 +}
1211 +
1212 +static struct task_struct *clear_out_linked(cpu_entry_t *entry, struct task_struct *t)
1213 +{
1214 + struct task_struct* next = NULL;
1215 +
1216 + if (entry->linked == t) {
1217 + tsk_rt(t)->linked_on = NO_CPU;
1218 + entry->linked = NULL;
1219 + } else {
1220 + next = entry->linked;
1221 + if (next)
1222 + tsk_rt(next)->scheduled_on = entry->cpu;
1223 + }
1224 + entry->scheduled = NULL;
1225 + tsk_rt(t)->scheduled_on = NO_CPU;
1226 +
1227 + return next;
1228 +}
1229 +
1230 +/* assumes entry is locked */
1231 +static struct task_struct * gedf_job_completion(cpu_entry_t *entry, struct task_struct *t)
1232 +{
1233 + struct task_struct* next;
1234 +
1235 + int forced = budget_enforced(t) && budget_exhausted(t);
1236 + TRACE_TASK(t, "completes.\n");
1237 +
1238 + next = clear_out_linked(entry, t);
1239 + tsk_rt(t)->job_completed = 1;
1240 +
1241 + unlock_cpu_state(entry);
1242 +
1243 + send_job_completed(t);
1244 + sched_trace_task_completion(t, forced);
1245 +
1246 + return next;
1247 +}
1248 +
1249 +static struct task_struct * gedf_job_suspension(cpu_entry_t *entry, struct task_struct *t)
1250 +{
1251 + struct task_struct* next;
1252 +
1253 + TRACE_TASK(t, "suspends.\n");
1254 +
1255 + next = clear_out_linked(entry, t);
1256 + tsk_rt(t)->job_suspended = 1;
1257 +
1258 + unlock_cpu_state(entry);
1259 +
1260 + send_job_suspended(t);
1261 +
1262 + return next;
1263 +}
1264 +
1265 +static struct task_struct * gedf_job_preemption(cpu_entry_t *entry, struct task_struct *t)
1266 +{
1267 + struct task_struct* next;
1268 +
1269 + TRACE_TASK(t, "preempted.\n");
1270 +
1271 + next = entry->linked;
1272 + if (next)
1273 + tsk_rt(next)->scheduled_on = entry->cpu;
1274 + if (t)
1275 + tsk_rt(t)->scheduled_on = NO_CPU;
1276 +
1277 + entry->scheduled = NULL;
1278 + unlock_cpu_state(entry);
1279 +
1280 + return next;
1281 +}
1282 +
1283 +static struct task_struct* gedf_schedule(struct task_struct * prev)
1284 +{
1285 + cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries);
1286 + int out_of_time, sleep, preempt, exists, blocks;
1287 + struct task_struct* next = NULL;
1288 +
1289 + /* Bail out early if we are the release master.
1290 + * The release master never schedules any real-time tasks; */
1291 + if (unlikely(gedf.release_master == entry->cpu)) {
1292 + sched_state_task_picked();
1293 + return NULL;
1294 + }
1295 +
1296 + raw_spin_lock(&entry->lock);
1297 +#ifdef WANT_ALL_SCHED_EVENTS
1298 + TRACE_TASK(prev, "invoked gedf_schedule.\n");
1299 +#endif
1300 +
1301 + BUG_ON(entry->scheduled && !is_realtime(entry->scheduled));
1302 + BUG_ON(entry->linked && !is_realtime(entry->linked));
1303 +
1304 + exists = entry->scheduled != NULL;
1305 + blocks = exists && !is_running(entry->scheduled);
1306 + out_of_time = exists && budget_enforced(entry->scheduled)
1307 + && budget_exhausted(entry->scheduled);
1308 + sleep = exists && is_completed(entry->scheduled);
1309 + preempt = entry->scheduled != entry->linked;
1310 +
1311 + if (exists) {
1312 + TRACE_TASK(prev, "blocks:%d out_of_time:%d sleep:%d preempt:%d\n",
1313 + blocks, out_of_time, sleep, preempt);
1314 + }
1315 +
1316 +
1317 + sched_state_task_picked();
1318 +
1319 + if (blocks) {
1320 + next = gedf_job_suspension(entry, entry->scheduled);
1321 + } else if (sleep || out_of_time) {
1322 + next = gedf_job_completion(entry, entry->scheduled);
1323 + } else if (preempt) {
1324 + next = gedf_job_preemption(entry, entry->scheduled);
1325 + } else {
1326 + next = entry->linked;
1327 + unlock_cpu_state(entry);
1328 + }
1329 + /* NOTE: entry is unlocked at this point */
1330 +
1331 +#ifdef WANT_ALL_SCHED_EVENTS
1332 + if (next)
1333 + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
1334 + else if (exists && !next)
1335 + TRACE("becomes idle at %llu.\n", litmus_clock());
1336 +#endif
1337 +
1338 + return next;
1339 +}
1340 +
1341 +
1342 +/* _finish_switch - we just finished the switch away from prev
1343 + */
1344 +static void gedf_finish_switch(struct task_struct *prev)
1345 +{
1346 + cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries);
1347 +
1348 + entry->scheduled = is_realtime(current) ? current : NULL;
1349 +#ifdef WANT_ALL_SCHED_EVENTS
1350 + TRACE_TASK(prev, "switched away from\n");
1351 +#endif
1352 +}
1353 +
1354 +
1355 +static struct domain_proc_info gedf_domain_proc_info;
1356 +static long gedf_get_domain_proc_info(struct domain_proc_info **ret)
1357 +{
1358 + *ret = &gedf_domain_proc_info;
1359 + return 0;
1360 +}
1361 +
1362 +static void gedf_setup_domain_proc(void)
1363 +{
1364 + int i, cpu;
1365 + int release_master = atomic_read(&release_master_cpu);
1366 + int num_rt_cpus = num_online_cpus() - (release_master != NO_CPU);
1367 + struct cd_mapping *map;
1368 +
1369 + memset(&gedf_domain_proc_info, sizeof(gedf_domain_proc_info), 0);
1370 + init_domain_proc_info(&gedf_domain_proc_info, num_rt_cpus, 1);
1371 + gedf_domain_proc_info.num_cpus = num_rt_cpus;
1372 + gedf_domain_proc_info.num_domains = 1;
1373 +
1374 + gedf_domain_proc_info.domain_to_cpus[0].id = 0;
1375 + i = 0;
1376 + for_each_online_cpu(cpu) {
1377 + if (cpu == release_master)
1378 + continue;
1379 + map = &gedf_domain_proc_info.cpu_to_domains[i];
1380 + map->id = cpu;
1381 + cpumask_set_cpu(0, map->mask);
1382 + ++i;
1383 +
1384 + /* add cpu to the domain */
1385 + cpumask_set_cpu(cpu,
1386 + gedf_domain_proc_info.domain_to_cpus[0].mask);
1387 + }
1388 +}
1389 +
1390 +
1391 +
1392 +static long gedf_activate_plugin(void)
1393 +{
1394 + int cpu;
1395 + cpu_entry_t *entry;
1396 +
1397 +#ifdef CONFIG_RELEASE_MASTER
1398 + gedf.release_master = atomic_read(&release_master_cpu);
1399 +#endif
1400 +
1401 + /* the dedicated scheduler needs a release master */
1402 + if (gedf.release_master == NO_CPU) {
1403 + printk(KERN_ERR "Cannot use dedicated scheduling core "
1404 + "if none is configured. Set release master first.\n");
1405 + return -EINVAL;
1406 + }
1407 +
1408 + memset(&master_state, 0, sizeof(master_state));
1409 +
1410 + for_each_online_cpu(cpu) {
1411 + entry = &per_cpu(gedf_cpu_entries, cpu);
1412 + entry->linked = NULL;
1413 + entry->scheduled = NULL;
1414 +#ifdef CONFIG_RELEASE_MASTER
1415 + if (cpu != gedf.release_master) {
1416 +#endif
1417 + TRACE(__FILE__ ": Initializing CPU #%d.\n", cpu);
1418 + master_state.cpu_online[cpu] = 1;
1419 + update_cpu_position(entry);
1420 +#ifdef CONFIG_RELEASE_MASTER
1421 + } else {
1422 + TRACE(__FILE__ ": CPU %d is release master.\n", cpu);
1423 + }
1424 +#endif
1425 + }
1426 +
1427 + gedf_setup_domain_proc();
1428 +
1429 + return 0;
1430 +}
1431 +
1432 +/* Plugin object */
1433 +static struct sched_plugin gedf_plugin __cacheline_aligned_in_smp = {
1434 + .plugin_name = "G-EDF-MP",
1435 + .finish_switch = gedf_finish_switch,
1436 + .task_new = gedf_task_new,
1437 + .complete_job = complete_job,
1438 + .schedule = gedf_schedule,
1439 + .task_wake_up = gedf_task_wake_up,
1440 + .admit_task = gedf_admit_task,
1441 + .task_exit = gedf_task_exit,
1442 + .task_cleanup = gedf_task_cleanup,
1443 + .activate_plugin = gedf_activate_plugin,
1444 + .get_domain_proc_info = gedf_get_domain_proc_info,
1445 +};
1446 +
1447 +
1448 +static int __init init_gedf_mp(void)
1449 +{
1450 + int cpu;
1451 + cpu_entry_t *entry;
1452 +
1453 + /* initialize CPU state */
1454 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
1455 + entry = &per_cpu(gedf_cpu_entries, cpu);
1456 + gedf_cpus[cpu] = entry;
1457 + entry->cpu = cpu;
1458 + raw_spin_lock_init(&entry->lock);
1459 + }
1460 + edf_domain_init(&gedf, NULL, gedf_release_jobs);
1461 + return register_sched_plugin(&gedf_plugin);
1462 +}
1463 +
1464 +
1465 +module_init(init_gedf_mp);
1466 diff --git a/litmus/trace.c b/litmus/trace.c
1467 index 6f2e295..6641e50 100644
1468 --- a/litmus/trace.c
1469 +++ b/litmus/trace.c
1470 @@ -271,18 +271,31 @@ feather_callback void save_cpu_timestamp_irq(unsigned long event,
1471 }
1472
1473
1474 -feather_callback void msg_sent(unsigned long event, unsigned long to)
1475 +/* Record to remote trace buffer */
1476 +feather_callback void msg_sent_to(unsigned long event, unsigned long to)
1477 {
1478 save_remote_msg_timestamp(event, to);
1479 }
1480
1481 +/* Record to local trace buffer */
1482 +feather_callback void msg_sent_local(unsigned long event)
1483 +{
1484 + save_msg_timestamp(event, 0);
1485 +}
1486 +
1487 /* Suppresses one IRQ from the irq count. Used by TS_SEND_RESCHED_END, which is
1488 * called from within an interrupt that is expected. */
1489 -feather_callback void msg_received(unsigned long event)
1490 +feather_callback void msg_received_local(unsigned long event)
1491 {
1492 save_msg_timestamp(event, 1);
1493 }
1494
1495 +/* Record to remote trace buffer */
1496 +feather_callback void msg_received_from(unsigned long event, unsigned long from)
1497 +{
1498 + save_remote_msg_timestamp(event, from);
1499 +}
1500 +
1501 static void __add_timestamp_user(struct timestamp *pre_recorded)
1502 {
1503 unsigned long flags;
Attached Files
To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.You are not allowed to attach a file to this page.