diff --git a/Makefile b/Makefile index 8e53f47..c10a5cd 100644 --- a/Makefile +++ b/Makefile @@ -351,7 +351,8 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -fno-delete-null-pointer-checks + -fno-delete-null-pointer-checks \ + -Wno-unused-but-set-variable KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := KBUILD_AFLAGS := -D__ASSEMBLY__ diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h index 48d086d..e1e73b8 100644 --- a/include/litmus/debug_trace.h +++ b/include/litmus/debug_trace.h @@ -13,19 +13,27 @@ void dump_trace_buffer(int max); extern atomic_t __log_seq_no; #ifdef CONFIG_SCHED_DEBUG_TRACE_CALLER -#define TRACE_PREFIX "%d P%d [%s@%s:%d]: " -#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ - raw_smp_processor_id(), \ - __FUNCTION__, __FILE__, __LINE__ +#define TRACE_PREFIX_CALLER "%d P%d [%s@%s:%d]: " +#define TRACE_ARGS_CALLER atomic_add_return(1, &__log_seq_no), \ + raw_smp_processor_id(), \ + __FUNCTION__, __FILE__, __LINE__ #else -#define TRACE_PREFIX "%d P%d: " -#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ +#define TRACE_PREFIX_CALLER "%d P%d: " +#define TRACE_ARGS_CALLER atomic_add_return(1, &__log_seq_no), \ raw_smp_processor_id() #endif -#define TRACE(fmt, args...) \ - sched_trace_log_message(TRACE_PREFIX fmt, \ - TRACE_ARGS, ## args) +#ifdef CONFIG_SCHED_DEBUG_TRACE_TIMING +#define TRACE_PREFIX TRACE_PREFIX_CALLER "[%09lu ns] " +#define TRACE_ARGS TRACE_ARGS_CALLER, ktime_to_ns(ktime_get()) +#else +#define TRACE_PREFIX TRACE_PREFIX_CALLER +#define TRACE_ARGS TRACE_ARGS_CALLER +#endif + +#define TRACE(fmt, args...) \ + sched_trace_log_message(TRACE_PREFIX fmt, \ + TRACE_ARGS, ##args) #define TRACE_TASK(t, fmt, args...) \ TRACE("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index caf2a1e..05f5c0b 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h @@ -19,8 +19,9 @@ typedef enum { FMLP_SEM = 0, SRP_SEM = 1, + BWI_SEM = 2, - MAX_OBJ_TYPE = 1 + MAX_OBJ_TYPE = BWI_SEM } obj_type_t; struct inode_obj_id { @@ -65,6 +66,7 @@ static inline void* od_lookup(int od, obj_type_t type) #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) +#define lookup_bwi_sem(od) ((struct bwi_semaphore*) od_lookup(od, BWI_SEM)) #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h index 9bd361e..1fb1625 100644 --- a/include/litmus/jobs.h +++ b/include/litmus/jobs.h @@ -2,6 +2,7 @@ #define __LITMUS_JOBS_H__ void prepare_for_next_period(struct task_struct *t); +void prepare_for_postponement(struct task_struct *t); void release_at(struct task_struct *t, lt_t start); long complete_job(void); diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 94086e2..0a9cfed 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -59,6 +59,19 @@ void litmus_exit_task(struct task_struct *tsk); #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) #define get_boost_start(t) (tsk_rt(t)->boost_start_time) +#define is_proxied(t) (tsk_rt(t)->proxied_by != NULL) +#define is_proxying(t) (tsk_rt(t)->proxying_for != NULL) +#define get_proxied(t) (is_proxied(t) ? tsk_rt(t)->proxied_by : t) +#define __get_proxying(t) (tsk_rt(t)->proxying_for) + +static inline struct task_struct* get_proxying(struct task_struct* t) +{ + while (is_proxying(t)) + t = __get_proxying(t); + + return t; +} + inline static int budget_exhausted(struct task_struct* t) { return get_exec_time(t) >= get_exec_cost(t); @@ -78,6 +91,9 @@ inline static lt_t budget_remaining(struct task_struct* t) #define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \ == PRECISE_ENFORCEMENT) +#define budget_action_complete(t) (tsk_rt(t)->task_params.budget_action \ + == COMPLETE_JOB) + #define is_hrt(t) \ (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) #define is_srt(t) \ @@ -110,6 +126,15 @@ static inline lt_t litmus_clock(void) #define is_tardy(t, now) \ (lt_before_eq(tsk_rt(t)->job_params.deadline, now)) +static inline int is_proxy_running(struct task_struct* t) +{ + struct task_struct* p = get_proxying(t); + + BUG_ON(p == t); + + return p != t && is_running(p); +} + /* real-time comparison macros */ #define earlier_deadline(a, b) (lt_before(\ (a)->rt_param.job_params.deadline,\ diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 5de422c..416a683 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -33,6 +33,11 @@ typedef enum { PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */ } budget_policy_t; +typedef enum { + COMPLETE_JOB, /* on overrun, mark the completion of the job */ + POSTPONE_DEADLINE /* on overrun, postpone the deadline */ +} budget_action_t; + struct rt_task { lt_t exec_cost; lt_t period; @@ -40,6 +45,7 @@ struct rt_task { unsigned int cpu; task_class_t cls; budget_policy_t budget_policy; /* ignored by pfair */ + budget_action_t budget_action; /* used only by C-EDF (for now) */ }; /* The definition of the data that is shared between the kernel and real-time @@ -113,6 +119,8 @@ struct rt_param { unsigned int priority_boosted:1; /* If so, when did this start? */ lt_t boost_start_time; + + wait_queue_t bwi_wait; #endif /* user controlled parameters */ @@ -129,6 +137,16 @@ struct rt_param { */ struct task_struct* inh_task; + /* ... + * ... + */ + struct task_struct* proxying_for; + struct task_struct* __proxying_for; + struct task_struct* proxied_by; + + struct list_head proxies; + struct list_head proxies_entry; + #ifdef CONFIG_NP_SECTION /* For the FMLP under PSN-EDF, it is required to make the task * non-preemptive from kernel space. In order not to interfere with diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7cabd..c3294f5 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h @@ -14,6 +14,7 @@ /************************ setup/tear down ********************/ typedef long (*activate_plugin_t) (void); +typedef long (*pre_deactivate_plugin_t) (void); typedef long (*deactivate_plugin_t) (void); @@ -74,6 +75,7 @@ struct sched_plugin { /* setup */ activate_plugin_t activate_plugin; + pre_deactivate_plugin_t pre_deactivate_plugin; deactivate_plugin_t deactivate_plugin; /* scheduler invocation */ diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 7ca34cb..01edb92 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h @@ -59,6 +59,11 @@ struct st_completion_data { /* A job completed. */ u8 __unused[7]; }; +struct st_postponement_data { /* A job has its deadline postponed because of overrun. */ + u64 when; + u64 __unused; +}; + struct st_block_data { /* A task blocks. */ u64 when; u64 __unused; @@ -91,6 +96,7 @@ typedef enum { ST_SWITCH_TO, ST_SWITCH_AWAY, ST_COMPLETION, + ST_POSTPONEMENT, ST_BLOCK, ST_RESUME, ST_ACTION, @@ -109,6 +115,7 @@ struct st_event_record { DATA(switch_to); DATA(switch_away); DATA(completion); + DATA(postponement); DATA(block); DATA(resume); DATA(action); @@ -145,6 +152,8 @@ feather_callback void do_sched_trace_task_switch_away(unsigned long id, feather_callback void do_sched_trace_task_completion(unsigned long id, struct task_struct* task, unsigned long forced); +feather_callback void do_sched_trace_task_postponement(unsigned long id, + struct task_struct* task); feather_callback void do_sched_trace_task_block(unsigned long id, struct task_struct* task); feather_callback void do_sched_trace_task_resume(unsigned long id, @@ -181,16 +190,19 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, #define sched_trace_task_completion(t, forced) \ SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ (unsigned long) forced) +/* FIXME: does this breaks any ABI? */ +#define sched_trace_task_postponement(t) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_postponement, t) #define sched_trace_task_block(t) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) + SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_block, t) #define sched_trace_task_resume(t) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) + SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_task_resume, t) #define sched_trace_action(t, action) \ - SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, do_sched_trace_action, t, \ + SCHED_TRACE2(SCHED_TRACE_BASE_ID + 10, do_sched_trace_action, t, \ (unsigned long) action); /* when is a pointer, it does not need an explicit cast to unsigned long */ #define sched_trace_sys_release(when) \ - SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) + SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_sys_release, when) #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ diff --git a/kernel/sched.c b/kernel/sched.c index c5d7750..ab34e97 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2372,8 +2372,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, unsigned long en_flags = ENQUEUE_WAKEUP; struct rq *rq; - if (is_realtime(p)) + if (is_realtime(p)) { + if (!p->state) + printk("waking a running litmus task: state=%d\n se.on_rq=%d", p->pid, p->se.on_rq); TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state); + } this_cpu = get_cpu(); diff --git a/litmus/Kconfig b/litmus/Kconfig index ad8dc83..63b18ec 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -180,6 +180,20 @@ config SCHED_DEBUG_TRACE_CALLER If unsure, say No. +config SCHED_DEBUG_TRACE_TIMING + bool "Include timing information ([... ns]) tag in TRACE() log" + depends on SCHED_DEBUG_TRACE + default n + help + With this option enabled, TRACE() prepends + + "[ ns]" + + to each message in the debug log. Adds even more clutter and also some + overhead, but could help understanting the timings while at debugging. + + If unsure, say No. + endmenu endmenu diff --git a/litmus/budget.c b/litmus/budget.c index 310e9a3..6e60799 100644 --- a/litmus/budget.c +++ b/litmus/budget.c @@ -62,13 +62,14 @@ static void arm_enforcement_timer(struct enforcement_timer* et, /* Calling this when there is no budget left for the task * makes no sense, unless the task is non-preemptive. */ - BUG_ON(budget_exhausted(t) && (!is_np(t))); + WARN_ON(budget_exhausted(t) && (!is_np(t))); /* __hrtimer_start_range_ns() cancels the timer * anyway, so we don't have to check whether it is still armed */ if (likely(!is_np(t))) { when_to_fire = litmus_clock() + budget_remaining(t); + TRACE_TASK(t, "remaining budget %llu --> firing at %llu\n", (unsigned long long)budget_remaining(t), (unsigned long long)when_to_fire); __hrtimer_start_range_ns(&et->timer, ns_to_ktime(when_to_fire), 0 /* delta */, @@ -84,6 +85,9 @@ void update_enforcement_timer(struct task_struct* t) { struct enforcement_timer* et = &__get_cpu_var(budget_timer); + if (t && is_proxied(t)) + t = get_proxied(t); + if (t && budget_precisely_enforced(t)) { /* Make sure we call into the scheduler when this budget * expires. */ diff --git a/litmus/fdso.c b/litmus/fdso.c index aa7b384..f7effc6 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c @@ -23,6 +23,7 @@ extern struct fdso_ops generic_lock_ops; static const struct fdso_ops* fdso_ops[] = { &generic_lock_ops, /* FMLP_SEM */ &generic_lock_ops, /* SRP_SEM */ + &generic_lock_ops, /* BWI_SEM */ }; static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) diff --git a/litmus/jobs.c b/litmus/jobs.c index 36e3146..83c8c22 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c @@ -20,6 +20,35 @@ void prepare_for_next_period(struct task_struct *t) t->rt.time_slice = 1; } +void prepare_for_postponement(struct task_struct *t) +{ + BUG_ON(!t); + + /* we assume we are in overrun situation... If not, why + * do we whant to postpone the deadline? + */ + WARN_ON(!budget_exhausted(t)); + + /* move the deadline ahead by one period for exactly the number + * of times we overrun the budget. Also, notice that we may charge + * the execution of this "fake instance" of the job with the + * overrun the task committed during the previous (fake or + * regulare) instance. + */ + do { + t->rt_param.job_params.deadline += get_rt_period(t); + t->rt_param.job_params.exec_time -= get_exec_cost(t); + } while (t->rt_param.job_params.exec_time > get_exec_cost(t)); + + /* do not touch t->rt_params.job_params.release or + * t->rt_param.job_params.job_no as we want to stay + * within the same job! + */ + + /* don't confuse Linux */ + t->rt.time_slice = 1; +} + void release_at(struct task_struct *t, lt_t start) { t->rt_param.job_params.deadline = start; diff --git a/litmus/litmus.c b/litmus/litmus.c index 11ccaaf..be154cc 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -120,6 +120,16 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) pid, tp.budget_policy); goto out_unlock; } + if (tp.budget_policy != NO_ENFORCEMENT && + (tp.budget_action != COMPLETE_JOB && + tp.budget_action != POSTPONE_DEADLINE)) + { + printk(KERN_INFO "litmus: real-time task %d rejected " + "because enforcement by means of an unsupported " + "action specified (%d)\n", + pid, tp.budget_action); + goto out_unlock; + } target->rt_param.task_params = tp; @@ -292,6 +302,10 @@ static void reinit_litmus_state(struct task_struct* p, int restore) */ WARN_ON(p->rt_param.inh_task); + /* And the same applies for proxy-execution related bits. + */ + WARN_ON(p->rt_param.proxying_for /*|| p->rt_param.proxied_by*/); + /* Cleanup everything else. */ memset(&p->rt_param, 0, sizeof(p->rt_param)); @@ -396,6 +410,8 @@ int switch_sched_plugin(struct sched_plugin* plugin) BUG_ON(!plugin); + litmus->pre_deactivate_plugin(); + /* forbid other cpus to use the plugin */ atomic_set(&cannot_use_plugin, 1); /* send IPI to force other CPUs to synch with us */ @@ -457,6 +473,8 @@ void litmus_exec(void) if (is_realtime(p)) { WARN_ON(p->rt_param.inh_task); + WARN_ON(p->rt_param.proxying_for || + p->rt_param.proxied_by); if (tsk_rt(p)->ctrl_page) { free_page((unsigned long) tsk_rt(p)->ctrl_page); tsk_rt(p)->ctrl_page = NULL; diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 73fe1c4..fde456b 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -55,6 +56,14 @@ static enum cache_level cluster_config = GLOBAL_CLUSTER; struct clusterdomain; +typedef struct { + int cpu; + struct task_struct* spin_on; + + struct work_struct work; +} pe_data_t; + + /* cpu_entry_t - maintain the linked and scheduled state * * A cpu also contains a pointer to the cedf_domain_t cluster @@ -67,6 +76,12 @@ typedef struct { struct task_struct* scheduled; /* only RT tasks */ atomic_t will_schedule; /* prevent unneeded IPIs */ struct bheap_node* hn; + + /* ... + * ... + */ + struct task_struct* pe_stub; + pe_data_t pe_data; } cpu_entry_t; /* one cpu_entry_t per CPU */ @@ -95,7 +110,7 @@ typedef struct clusterdomain { struct bheap_node *heap_node; struct bheap cpu_heap; /* lock for this cluster */ -#define lock domain.ready_lock +#define cedf_lock domain.ready_lock } cedf_domain_t; /* a cedf_domain per cluster; allocation is done at init/activation time */ @@ -292,12 +307,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); unsigned long flags; - raw_spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->cedf_lock, flags); __merge_ready(&cluster->domain, tasks); check_for_preemptions(cluster); - raw_spin_unlock_irqrestore(&cluster->lock, flags); + raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); } /* caller holds cedf_lock */ @@ -323,6 +338,55 @@ static noinline void job_completion(struct task_struct *t, int forced) cedf_job_arrival(t); } +/* as above, called with cedf_lock held */ +static noinline void job_postponement(struct task_struct *t) +{ + BUG_ON(!t); + + sched_trace_task_postponement(t); + + TRACE_TASK(t, "job_postponement().\n"); + + /* push the deadline of this instance ahead */ + prepare_for_postponement(t); + /* unlink and requeue mimicing a new job arrival */ + unlink(t); + if (is_running(t)) + cedf_job_arrival(t); +} + +/* check wether to release a new job or keep running the current one */ +static noinline int job_overflow(struct task_struct *task, lt_t now) +{ + lt_t right, left; + + /* Deadline postponing task can use their remaining budget with + * their current deadline iff this does not cause them to exceed the + * bandwidth they have been assigned. This is checked by + * verifying that: + * remaining_budget / (deadline - now) < exec_cost / rt_period + */ + if (budget_enforced(task) && !budget_action_complete(task)) { + + /* left and right are the two sides of the equation above, + * after a bit of shuffling to use multiplications instead + * of divisions. + */ + left = get_rt_period(task) * budget_remaining(task); + right = (get_deadline(task) - now) * get_exec_cost(task); + + return lt_before(right, left); + } else + /* Different kind of tasks always consider a weke up as a + * new job release, apart from when they resume from + * waiting for a semaphore. + */ + if (is_tardy(task, now) && get_rt_flags(task) != RT_F_EXIT_SEM) + return 0; + + return 1; +} + /* cedf_tick - this function is called for every local timer * interrupt. * @@ -375,26 +439,37 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) { cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); cedf_domain_t *cluster = entry->cluster; - int out_of_time, sleep, preempt, np, exists, blocks; - struct task_struct* next = NULL; + int out_of_time, oot_scheduled, oot_complete, + sleep, preempt, np, exists, proxied, blocks; + struct task_struct *scheduled = NULL, *next = NULL; - raw_spin_lock(&cluster->lock); + raw_spin_lock(&cluster->cedf_lock); clear_will_schedule(); /* sanity checking */ BUG_ON(entry->scheduled && entry->scheduled != prev); BUG_ON(entry->scheduled && !is_realtime(prev)); - BUG_ON(is_realtime(prev) && !entry->scheduled); + //BUG_ON(is_realtime(prev) && !entry->scheduled); + //BUG_ON(scheduled != entry->scheduled && scheduled == entry->linked); + + /* ... + * ... + */ /* (0) Determine state */ - exists = entry->scheduled != NULL; - blocks = exists && !is_running(entry->scheduled); - out_of_time = exists && - budget_enforced(entry->scheduled) && - budget_exhausted(entry->scheduled); - np = exists && is_np(entry->scheduled); - sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; - preempt = entry->scheduled != entry->linked; + exists = entry->scheduled != NULL; + proxied = exists && is_proxied(entry->scheduled); + scheduled = exists ? get_proxied(entry->scheduled) : NULL; + blocks = exists && !is_running(entry->scheduled); + out_of_time = exists && + budget_enforced(entry->scheduled) && + budget_exhausted(entry->scheduled); + oot_scheduled = proxied ? budget_enforced(scheduled) && + budget_exhausted(scheduled) : out_of_time; + oot_complete = oot_scheduled && budget_action_complete(scheduled); + np = exists && is_np(entry->scheduled); + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; + preempt = scheduled != entry->linked; #ifdef WANT_ALL_SCHED_EVENTS TRACE_TASK(prev, "invoked cedf_schedule.\n"); @@ -406,6 +481,9 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) "state:%d sig:%d\n", blocks, out_of_time, np, sleep, preempt, prev->state, signal_pending(prev)); + if (proxied) + TRACE_TASK(prev, "is being proxied by (%s/%d) oot_scheduled:%d oot_complete:%d\n", + scheduled->comm, scheduled->pid, oot_scheduled, oot_complete); if (entry->linked && preempt) TRACE_TASK(prev, "will be preempted by %s/%d\n", entry->linked->comm, entry->linked->pid); @@ -413,15 +491,26 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) /* If a task blocks we have no choice but to reschedule. */ - if (blocks) - unlink(entry->scheduled); + if (blocks) { + + /* ... + * ... + */ + if (!proxied) + unlink(entry->scheduled); + else { + unlink(scheduled); + scheduled->state = entry->scheduled->state; + TRACE_TASK(scheduled, "blocking since it is proxying %s/%d which is blocked (state:%d)\n", entry->scheduled->comm, entry->scheduled->pid, entry->scheduled->state); + } + } /* Request a sys_exit_np() call if we would like to preempt but cannot. * We need to make sure to update the link structure anyway in case * that we are still linked. Multiple calls to request_exit_np() don't * hurt. */ - if (np && (out_of_time || preempt || sleep)) { + if (np && (oot_scheduled || preempt || sleep)) { unlink(entry->scheduled); request_exit_np(entry->scheduled); } @@ -431,38 +520,133 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) * this. Don't do a job completion if we block (can't have timers running * for blocked jobs). Preemption go first for the same reason. */ - if (!np && (out_of_time || sleep) && !blocks && !preempt) - job_completion(entry->scheduled, !sleep); + if (!np && (oot_scheduled || sleep) && !blocks /*&& (!oot_complete || !preempt)*/) { + /* This shouldn't happen, unless the timekeeping is seriously + * broken and there are differences between ktime and + * rq->clock. If this happens (e.g., under KVM, or emulation + * in general), maybe it's worthwhile to dig more and try + * to fix things (i.e., reconciling the two time tracking + * mechanisms). + */ + WARN_ON_ONCE(preempt); + + if (sleep || oot_complete) + job_completion(scheduled, !sleep); + else + job_postponement(scheduled); + } /* Link pending task if we became unlinked. */ if (!entry->linked) +link_retry: link_task_to_cpu(__take_ready(&cluster->domain), entry); + if (entry->linked && is_proxying(entry->linked) && + !is_proxy_running(entry->linked)) { + TRACE_TASK(entry->linked, "picked up but unlinked too, as it is proxying a blocked task (state:%d)\n", get_proxying(entry->linked)->state); + + entry->linked->state = get_proxying(entry->linked)->state; + unlink(entry->linked); + + goto link_retry; + } + /* The final scheduling decision. Do we need to switch for some reason? * If linked is different from scheduled, then select linked as next. */ if ((!np || blocks) && - entry->linked != entry->scheduled) { + (entry->linked != scheduled || entry->linked != entry->scheduled)) { + if (scheduled) { + /* not gonna be scheduled soon */ + scheduled->rt_param.scheduled_on = NO_CPU; + TRACE_TASK(scheduled, "scheduled_on = NO_CPU\n"); + } + if (scheduled && scheduled != entry->scheduled /*&& scheduled != entry->linked*/) { + /* the proxied task neither is scheduled anymore */ + entry->scheduled->rt_param.scheduled_on = NO_CPU; + TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); + + /**/ + if (entry->scheduled == entry->pe_stub && + tsk_rt(scheduled)->proxying_for == entry->pe_stub) + tsk_rt(scheduled)->proxying_for = tsk_rt(scheduled)->__proxying_for; + } /* Schedule a linked job? */ if (entry->linked) { entry->linked->rt_param.scheduled_on = entry->cpu; next = entry->linked; - } - if (entry->scheduled) { - /* not gonna be scheduled soon */ - entry->scheduled->rt_param.scheduled_on = NO_CPU; - TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); + TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id()); + /* ... + * ... + */ + if (is_proxied(next)) { + TRACE_TASK(next, "clear %s/%d as its proxied_by\n", + get_proxied(next)->comm, + get_proxied(next)->pid); + tsk_rt(next)->proxied_by = NULL; + } } } else /* Only override Linux scheduler if we have a real-time task * scheduled that needs to continue. */ if (exists) - next = prev; + next = scheduled; + + /* Under normal circumstances we wouldn't switch, check if we need + * to because of prev just became a proxy for someone else. + */ + + /* FIXME: could these things actually happen? */ + WARN_ON(scheduled && scheduled == next && !is_realtime(scheduled)); + WARN_ON(next && next != scheduled && !is_realtime(next)); + + if (next && is_proxying(next)) { + struct task_struct *target = get_proxying(next); + struct rt_param *tpi; + int target_cpu; + + /* FIXME: here we assume target is both schedulable and + * not currently scheduled, which might be not true!! + */ + TRACE_TASK(next, "will act as a proxy for %s/%d\n", + target->comm, target->pid); + + //ti = list_first_entry(&tsk_rt(target)->proxies, struct rt_param, proxies_entry) + TRACE_TASK(target, "list of current proxies:\n"); + list_for_each_entry(tpi, &tsk_rt(target)->proxies, proxies_entry) { + struct task_struct *ti = container_of(tpi, struct task_struct, rt_param); + + TRACE_TASK(target, " <-- %s/%d\n", ti->comm, ti->pid); + } + + /* ... + * ... + */ + target_cpu = tsk_rt(target)->scheduled_on; + if (target_cpu != NO_CPU && target_cpu != smp_processor_id()) { + //struct task_struct *target_proxy = get_proxied(target); + TRACE_TASK(target, "already running on P%d, we need to wait\n", target_cpu); + + WARN_ON(target == entry->pe_stub); + + tsk_rt(next)->__proxying_for = tsk_rt(next)->proxying_for; + tsk_rt(next)->proxying_for = entry->pe_stub; + tsk_rt(entry->pe_stub)->proxied_by = next; + entry->pe_data.spin_on = target; + target = entry->pe_stub; + } + + tsk_rt(target)->proxied_by = next; + next = target; + + next->rt_param.scheduled_on = entry->cpu; + TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id()); + } sched_state_task_picked(); - raw_spin_unlock(&cluster->lock); + raw_spin_unlock(&cluster->cedf_lock); #ifdef WANT_ALL_SCHED_EVENTS TRACE("cedf_lock released, next=0x%p\n", next); @@ -499,7 +683,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) cpu_entry_t* entry; cedf_domain_t* cluster; - TRACE("gsn edf: task new %d\n", t->pid); + TRACE("c-edf: task new %d\n", t->pid); /* the cluster doesn't change even if t is running */ cluster = task_cpu_cluster(t); @@ -509,6 +693,9 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) /* setup job params */ release_at(t, litmus_clock()); + INIT_LIST_HEAD(&t->rt_param.proxies); + INIT_LIST_HEAD(&t->rt_param.proxies_entry); + if (running) { entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); BUG_ON(entry->scheduled); @@ -524,55 +711,107 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); } +static inline +void __cedf_task_wake_up(struct task_struct *task, lt_t now) +{ + /* Deadline postponing tasks consider all resumes the same way, + * independently from them being caused by a semaphore, by actual + * suspensions, etc. In this case, wether or not to continue using + * the current scheduling parameters for this job is decided depending + * on how much bandwidth is left for the task in this instance. + * + * On the other hand, for all the other configurations (not enforced, + * or enforced by forced job completion) we need to take suspensions + * because of semaphores into account! If a job resumes after being + * suspended due to acquiring a semaphore, it should never be treated + * as a new job release. + * + * This is all handled inside job_overflow(). + */ + if (job_overflow(task, now)) + goto release; + + if (task->rt.time_slice) /* FIXME: What's this for? */ + goto no_release; + else + goto arrival; + +release: + /* new sporadic release */ + release_at(task, now); + sched_trace_task_release(task); +no_release: + /* came back in time before deadline */ + set_rt_flags(task, RT_F_RUNNING); +arrival: + cedf_job_arrival(task); +} + static void cedf_task_wake_up(struct task_struct *task) { unsigned long flags; lt_t now; cedf_domain_t *cluster; + struct rt_param *tpi; TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); cluster = task_cpu_cluster(task); - raw_spin_lock_irqsave(&cluster->lock, flags); - /* We need to take suspensions because of semaphores into - * account! If a job resumes after being suspended due to acquiring - * a semaphore, it should never be treated as a new job release. + raw_spin_lock_irqsave(&cluster->cedf_lock, flags); + now = litmus_clock(); + + /* ... + * ... */ - if (get_rt_flags(task) == RT_F_EXIT_SEM) { - set_rt_flags(task, RT_F_RUNNING); - } else { - now = litmus_clock(); - if (is_tardy(task, now)) { - /* new sporadic release */ - release_at(task, now); - sched_trace_task_release(task); - } - else { - if (task->rt.time_slice) { - /* came back in time before deadline - */ - set_rt_flags(task, RT_F_RUNNING); - } + if (tsk_rt(task)->linked_on == NO_CPU && !is_queued(task)) + __cedf_task_wake_up(task, now); + + /* ... + * ... + */ + list_for_each_entry(tpi, &tsk_rt(task)->proxies, proxies_entry) { + struct task_struct *ti = container_of(tpi, struct task_struct, rt_param); + + if (tsk_rt(ti)->linked_on == NO_CPU && !is_running(ti) && !is_queued(ti)) { + __cedf_task_wake_up(ti, now); + ti->state = TASK_RUNNING; + TRACE_TASK(ti, "woken as it is a proxy for %s/%d, state:%d\n", + task->comm, task->pid, ti->state); } } - cedf_job_arrival(task); - raw_spin_unlock_irqrestore(&cluster->lock, flags); + + raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); } static void cedf_task_block(struct task_struct *t) { unsigned long flags; cedf_domain_t *cluster; + //struct rt_param *tpi; TRACE_TASK(t, "block at %llu\n", litmus_clock()); cluster = task_cpu_cluster(t); /* unlink if necessary */ - raw_spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->cedf_lock, flags); + if (is_proxied(t)) { + t = get_proxied(t); + TRACE_TASK(t, "will actually block, as the current proxy\n"); + } unlink(t); - raw_spin_unlock_irqrestore(&cluster->lock, flags); + + /*list_for_each_entry(tpi, &tsk_rt(t)->proxies, proxies_entry) { + struct task_struct *ti = container_of(tpi, struct task_struct, rt_param); + + ti->state = t->state; + unlink(ti); + TRACE_TASK(ti, "unlinked as it is a proxy for %s/%d\n", + t->comm, t->pid); + }*/ + + raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); BUG_ON(!is_realtime(t)); } @@ -584,7 +823,7 @@ static void cedf_task_exit(struct task_struct * t) cedf_domain_t *cluster = task_cpu_cluster(t); /* unlink if necessary */ - raw_spin_lock_irqsave(&cluster->lock, flags); + raw_spin_lock_irqsave(&cluster->cedf_lock, flags); unlink(t); if (tsk_rt(t)->scheduled_on != NO_CPU) { cpu_entry_t *cpu; @@ -592,7 +831,10 @@ static void cedf_task_exit(struct task_struct * t) cpu->scheduled = NULL; tsk_rt(t)->scheduled_on = NO_CPU; } - raw_spin_unlock_irqrestore(&cluster->lock, flags); + + WARN_ON(!list_empty(&tsk_rt(t)->proxies) || !list_empty(&tsk_rt(t)->proxies_entry)); + + raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); BUG_ON(!is_realtime(t)); TRACE_TASK(t, "RIP\n"); @@ -638,6 +880,374 @@ static void cleanup_cedf(void) } } +#ifdef CONFIG_LITMUS_LOCKING + +#include + + +static void set_proxy_execution(struct task_struct* t, struct task_struct* proxy) +{ +} + +/* called with IRQs off */ +static void clear_proxy_execution(struct task_struct* t) +{ +} + +/* ******************** BWI support ********************** */ + +/* struct for semaphore with priority inheritance */ +struct bwi_semaphore { + struct litmus_lock litmus_lock; + /* current resource holder */ + struct task_struct *owner; + + /* FIFO queue of waiting tasks */ + wait_queue_head_t wait; +}; + +static inline struct bwi_semaphore* bwi_from_lock(struct litmus_lock* lock) +{ + return container_of(lock, struct bwi_semaphore, litmus_lock); +} + +int cedf_bwi_lock(struct litmus_lock* lock) +{ + struct task_struct* t = current; + struct bwi_semaphore *sem = bwi_from_lock(lock); + unsigned long flags; + + if (!is_realtime(current)) + return -EPERM; + + spin_lock_irqsave(&sem->wait.lock, flags); + + if (sem->owner) { + struct rt_param *tpi, *tpi_h; + struct task_struct *target; + + /* lock is not free */ + init_waitqueue_entry(&tsk_rt(current)->bwi_wait, current); + __add_wait_queue_tail_exclusive(&sem->wait, &tsk_rt(t)->bwi_wait); + + TRACE_CUR("lock is busy, we start proxying for %s/%d (or its chain)\n", + sem->owner->comm, sem->owner->pid); + + raw_spin_lock(&task_cpu_cluster(t)->cedf_lock); + + /* ... + * ... + */ + tsk_rt(t)->proxying_for = sem->owner; + target = get_proxying(sem->owner); + list_add(&tsk_rt(t)->proxies_entry, &tsk_rt(target)->proxies); + + list_for_each_entry_safe(tpi, tpi_h, &tsk_rt(t)->proxies, proxies_entry) { + //struct task_struct *ti = container_of(tpi, struct task_struct, rt_param); + list_move(&tpi->proxies_entry, &tsk_rt(target)->proxies); + } + + raw_spin_unlock(&task_cpu_cluster(t)->cedf_lock); + spin_unlock_irqrestore(&sem->wait.lock, flags); + + schedule(); + + /* ... + * ... + */ + //TRACE_CUR("lock owner is %s/%d\n", sem->owner->comm, sem->owner->pid); + BUG_ON(sem->owner != t); + } else { + TRACE_CUR("takes the lock\n"); + + sem->owner = t; + + spin_unlock_irqrestore(&sem->wait.lock, flags); + } + + return 0; +} + +int cedf_bwi_unlock(struct litmus_lock* lock) +{ + struct task_struct *t = current, *next; + struct bwi_semaphore *sem = bwi_from_lock(lock); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&sem->wait.lock, flags); + + if (sem->owner != t) { + ret = -EINVAL; + goto out_unlock; + } + next = waitqueue_first(&sem->wait); + if (next) { + wait_queue_t *wqi; + struct rt_param *tpi, *tpi_h; + + /* next becomes the resource holder */ + sem->owner = next; + __remove_wait_queue(&sem->wait, &tsk_rt(next)->bwi_wait); + TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); + + /* let's check if ... + */ + raw_spin_lock(&task_cpu_cluster(t)->cedf_lock); + //WARN_ON(task_cpu_cluster(t) != task_cpu_cluster(next)); + + tsk_rt(next)->proxying_for = NULL; + list_del_init(&tsk_rt(next)->proxies_entry); + + /* ... + * ... + */ + list_for_each_entry(wqi, &sem->wait.task_list, task_list) { + struct rt_param *tsk_param = container_of(wqi, struct rt_param, bwi_wait); + struct task_struct *wqt = container_of(tsk_param, struct task_struct, rt_param); + + tsk_param->proxying_for = next; + //list_move(&tsk_param->proxies_entry, &tsk_rt(sem->owner)->proxies); + TRACE_TASK(next, "set as the new proxying_for in %s/%d!\n", + wqt->comm, wqt->pid); + } + + /* ... + * ... + */ + list_for_each_entry_safe(tpi, tpi_h, &tsk_rt(t)->proxies, proxies_entry) { + struct task_struct *ti = container_of(tpi, struct task_struct, rt_param); + + if (get_proxying(ti) == next) + list_move(&tpi->proxies_entry, &tsk_rt(next)->proxies); + } + + if (is_proxied(t)) { + TRACE_CUR("was being proxied, we need to reschedule now!\n"); + + raw_spin_unlock(&task_cpu_cluster(t)->cedf_lock); + spin_unlock_irqrestore(&sem->wait.lock, flags); + + schedule(); + + goto out; + } + + raw_spin_unlock(&task_cpu_cluster(t)->cedf_lock); + } else + /* lock becomes available */ + sem->owner = NULL; + +out_unlock: + spin_unlock_irqrestore(&sem->wait.lock, flags); +out: + return ret; +} + +int cedf_bwi_close(struct litmus_lock* lock) +{ + struct task_struct *t = current; + struct bwi_semaphore *sem = bwi_from_lock(lock); + unsigned long flags; + int owner; + + spin_lock_irqsave(&sem->wait.lock, flags); + owner = sem->owner == t; + spin_unlock_irqrestore(&sem->wait.lock, flags); + + if (owner) + cedf_bwi_unlock(lock); + + return 0; +} + +void cedf_bwi_free(struct litmus_lock* lock) +{ + TRACE("bwi_lock 0x%p being freed\n", lock); + + kfree(bwi_from_lock(lock)); +} + +static struct litmus_lock_ops cedf_bwi_lock_ops = { + .close = cedf_bwi_close, + .lock = cedf_bwi_lock, + .unlock = cedf_bwi_unlock, + .deallocate = cedf_bwi_free, +}; + +static struct litmus_lock* cedf_new_bwi(void) +{ + struct bwi_semaphore* sem; + + sem = kmalloc(sizeof(*sem), GFP_KERNEL); + if (!sem) + return NULL; + + sem->owner = NULL; + init_waitqueue_head(&sem->wait); + sem->litmus_lock.ops = &cedf_bwi_lock_ops; + + TRACE("new bwi_lock 0x%p created\n", &sem->litmus_lock); + + return &sem->litmus_lock; +} + +/* **** lock constructor **** */ + + +static long cedf_allocate_lock(struct litmus_lock **lock, int type, + void * __user unused) +{ + int err = -ENXIO; + + /* C-EDF currently supports the M-BWI protocol for ? resources. */ + switch (type) { + case BWI_SEM: + /* Multiprocessor BandWidth Inheritance Protocol */ + *lock = cedf_new_bwi(); + if (*lock) + err = 0; + else + err = -ENOMEM; + break; + }; + + return err; + +} + +#endif + + +static void kthread_set_litmus(void) +{ + struct sched_param param = { .sched_priority = 0}; + struct rt_task tp = { + .exec_cost = 0, + .period = 1000000000, + .phase = 0, + .cpu = task_cpu(current), + .budget_policy = NO_ENFORCEMENT, + .budget_action = COMPLETE_JOB, + .cls = RT_CLASS_BEST_EFFORT}; + + /*INIT_LIST_HEAD(&tp.proxies); + INIT_LIST_HEAD(&tp.proxies_entry); + + tp.scheduled_on = NO_CPU; + tp.linked_on = NO_CPU;*/ + + /* transition to SCHED_LITMUS */ + tsk_rt(current)->task_params = tp; + sched_setscheduler_nocheck(current, SCHED_LITMUS, ¶m); + +} + +static int pe_stub_thread(void *data) +{ + struct sched_param param = { .sched_priority = 0 }; + pe_data_t *pe_data = data; + unsigned long flags; + int ret = 0; + + TRACE_CUR("starting, this is happening on P%d, right?\n", pe_data->cpu); + + kthread_set_litmus(); + BUG_ON(!is_realtime(current)); + + set_current_state(TASK_INTERRUPTIBLE); + + while (!kthread_should_stop()) { + cedf_domain_t *cluster = task_cpu_cluster(current); + struct task_struct *my_proxy, *spin_on; + cpu_entry_t *entry; + + /* Mmm... stub running and not being proxied? + * Something is wrong, let's tell it loudly. + */ + WARN_ON(!is_proxied(current) && pe_data->spin_on != NULL); + //WARN_ON(pe_data->spin_on && !is_realtime(pe_data->spin_on)); + if (pe_data->spin_on && !is_realtime(pe_data->spin_on)) { + printk(KERN_INFO "%d is not realtime!", pe_data->spin_on->pid); + WARN_ON(1); + } + + while (!pe_data->spin_on) { + TRACE_CUR("nothing to do, going to sleep. Was that expected?\n"); + __set_current_state(TASK_INTERRUPTIBLE); + + schedule(); + + if (kthread_should_stop()) + goto killed; + } + + __set_current_state(TASK_RUNNING); + + my_proxy = tsk_rt(current)->proxied_by; + spin_on = pe_data->spin_on; + + while (spin_on == pe_data->spin_on && is_realtime(spin_on) && + my_proxy == tsk_rt(current)->proxied_by && + tsk_rt(spin_on)->scheduled_on != NO_CPU && + tsk_rt(my_proxy)->proxying_for == current) + cpu_relax(); + + raw_spin_lock_irqsave(&cluster->cedf_lock, flags); + + entry = container_of(pe_data, cpu_entry_t, pe_data); + + BUG_ON(pe_data->cpu != smp_processor_id()); + BUG_ON(entry->cpu != pe_data->cpu); + + TRACE_CUR("%s/%d no longer running? let's give it a try!\n", + spin_on->comm, spin_on->pid); + + /* Recheck with the lock on the cluster!! */ + if (spin_on == pe_data->spin_on && + my_proxy == tsk_rt(current)->proxied_by && + (tsk_rt(spin_on)->scheduled_on == NO_CPU || + tsk_rt(my_proxy)->proxying_for != current || + !is_realtime(spin_on))) { + /* proxying_for may have changed if we became the + * owner of the lock or something happened on the + * blocking chain, so make sure we do not miss such + * events by updating the cached proxying_for with + * the new one and let cedf_schedule() put things + * right. + */ + //pe_data->old_proxying_for = tsk_rt(my_proxy)->proxying_for; + + raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); + + schedule(); + } else + raw_spin_unlock_irqrestore(&cluster->cedf_lock, flags); + } + +killed: + TRACE_CUR("proxy stab terminating on P%d\n", pe_data->cpu); + + sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m); + __set_current_state(TASK_RUNNING); + + return ret; +} + +static void run_stub(struct work_struct *work) +{ + pe_data_t *pe_data = container_of(work, pe_data_t, work); + cpu_entry_t *entry = container_of(pe_data, cpu_entry_t, pe_data); + + entry->pe_stub = kthread_create(pe_stub_thread, pe_data, "pe_stub-%d", pe_data->cpu); + + kthread_bind(entry->pe_stub, pe_data->cpu); + + TRACE("waking up proxy stub for P%d\n", pe_data->cpu); + wake_up_process(entry->pe_stub); +} + + static long cedf_activate_plugin(void) { int i, j, cpu, ccpu, cpu_count; @@ -741,6 +1351,12 @@ static long cedf_activate_plugin(void) entry->linked = NULL; entry->scheduled = NULL; update_cpu_position(entry); + + entry->pe_data.cpu = ccpu; + entry->pe_data.spin_on = NULL; + entry->pe_stub = NULL; + INIT_WORK(&entry->pe_data.work, run_stub); + schedule_work(&entry->pe_data.work); } /* done with this cluster */ break; @@ -752,6 +1368,28 @@ static long cedf_activate_plugin(void) return 0; } +static long cedf_pre_deactivate_plugin(void) +{ + int i, ccpu, ret = 0; + + for (i = 0; i < num_clusters; i++) { + for_each_cpu(ccpu, cedf[i].cpu_map) { + cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, ccpu); + + /* ensure the proxy stub will wake up and check for + * the should_stop flag. + */ + entry->pe_data.spin_on = NULL; + set_task_state(entry->pe_stub, TASK_INTERRUPTIBLE); + //unlink(entry->pe_stub); + + ret = kthread_stop(entry->pe_stub); + } + } + + return ret; +} + /* Plugin object */ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { .plugin_name = "C-EDF", @@ -765,6 +1403,10 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { .task_block = cedf_task_block, .admit_task = cedf_admit_task, .activate_plugin = cedf_activate_plugin, + .pre_deactivate_plugin = cedf_pre_deactivate_plugin, +#ifdef CONFIG_LITMUS_LOCKING + .allocate_lock = cedf_allocate_lock, +#endif }; static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index e695289..d84d839 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -7,9 +7,16 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p) { - u64 delta = rq->clock - p->se.exec_start; + u64 delta; + + //if (is_realtime(p)) + WARN_ON(!is_realtime(p)); + p = get_proxied(p); + + delta = rq->clock - p->se.exec_start; if (unlikely((s64)delta < 0)) delta = 0; + /* per job counter */ p->rt_param.job_params.exec_time += delta; /* task counter */ diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d54886d..c938970 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -21,8 +21,8 @@ */ void preempt_if_preemptable(struct task_struct* t, int cpu) { - /* t is the real-time task executing on CPU on_cpu If t is NULL, then - * on_cpu is currently scheduling background work. + /* t is the real-time task executing on CPU cpu If t is NULL, then + * cpu is currently scheduling background work. */ int reschedule = 0; @@ -116,6 +116,11 @@ static long litmus_dummy_activate_plugin(void) return 0; } +static long litmus_dummy_pre_deactivate_plugin(void) +{ + return 0; +} + static long litmus_dummy_deactivate_plugin(void) { return 0; @@ -146,6 +151,7 @@ struct sched_plugin linux_sched_plugin = { .schedule = litmus_dummy_schedule, .finish_switch = litmus_dummy_finish_switch, .activate_plugin = litmus_dummy_activate_plugin, + .pre_deactivate_plugin = litmus_dummy_pre_deactivate_plugin, .deactivate_plugin = litmus_dummy_deactivate_plugin, #ifdef CONFIG_LITMUS_LOCKING .allocate_lock = litmus_dummy_allocate_lock, @@ -184,6 +190,7 @@ int register_sched_plugin(struct sched_plugin* plugin) CHECK(task_new); CHECK(complete_job); CHECK(activate_plugin); + CHECK(pre_deactivate_plugin); CHECK(deactivate_plugin); #ifdef CONFIG_LITMUS_LOCKING CHECK(allocate_lock); diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 5ef8d09..3080b15 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c @@ -192,6 +192,17 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, } } +feather_callback void do_sched_trace_task_postponement(unsigned long id, + unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record *rec = get_record(ST_POSTPONEMENT, t); + if (rec) { + rec->data.postponement.when = now(); + put_record(rec); + } +} + feather_callback void do_sched_trace_task_block(unsigned long id, unsigned long _task) {