--- zzzz-none-000/linux-3.10.107/include/linux/sched.h 2017-06-27 09:49:32.000000000 +0000 +++ vr9-7490-729/linux-3.10.107/include/linux/sched.h 2021-11-10 11:53:56.000000000 +0000 @@ -236,7 +236,7 @@ extern void set_cpu_sd_state_idle(void); extern int get_nohz_timer_target(void); #else -static inline void nohz_balance_enter_idle(int cpu) { } +static inline void nohz_balance_enter_idle(int cpu __attribute__((unused))) { } static inline void set_cpu_sd_state_idle(void) { } #endif @@ -897,12 +897,12 @@ struct sched_domain_attr; static inline void -partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new) +partition_sched_domains(int ndoms_new __attribute__((unused)), cpumask_var_t doms_new[] __attribute__((unused)), + struct sched_domain_attr *dattr_new __attribute__((unused))) { } -static inline bool cpus_share_cache(int this_cpu, int that_cpu) +static inline bool cpus_share_cache(int this_cpu __attribute__((unused)), int that_cpu __attribute__((unused))) { return true; } @@ -916,7 +916,7 @@ #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK extern void prefetch_stack(struct task_struct *t); #else -static inline void prefetch_stack(struct task_struct *t) { } +static inline void prefetch_stack(struct task_struct *t __maybe_unused) { } #endif struct audit_context; /* See audit.c */ @@ -1439,10 +1439,12 @@ extern void task_numa_fault(int node, int pages, bool migrated); extern void set_numabalancing_state(bool enabled); #else -static inline void task_numa_fault(int node, int pages, bool migrated) +static inline void task_numa_fault(int node __maybe_unused, + int pages __maybe_unused, + bool migrated __maybe_unused) { } -static inline void set_numabalancing_state(bool enabled) +static inline void set_numabalancing_state(bool enabled __maybe_unused) { } #endif @@ -1750,7 +1752,7 @@ #else -static inline void rcu_copy_process(struct task_struct *p) +static inline void rcu_copy_process(struct task_struct *p __maybe_unused) { } @@ -1770,11 +1772,11 @@ extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); #else -static inline void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask) +static inline void do_set_cpus_allowed(struct task_struct *p __attribute__((unused)), + const struct cpumask *new_mask __attribute__((unused))) { } -static inline int set_cpus_allowed_ptr(struct task_struct *p, +static inline int set_cpus_allowed_ptr(struct task_struct *p __attribute__((unused)), const struct cpumask *new_mask) { if (!cpumask_test_cpu(0, new_mask)) @@ -1826,7 +1828,7 @@ { } -static inline void sched_clock_idle_wakeup_event(u64 delta_ns) +static inline void sched_clock_idle_wakeup_event(u64 delta_ns __maybe_unused) { } #else @@ -1878,7 +1880,7 @@ #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) extern void wake_up_nohz_cpu(int cpu); #else -static inline void wake_up_nohz_cpu(int cpu) { } +static inline void wake_up_nohz_cpu(int cpu __attribute__((unused))) { } #endif #ifdef CONFIG_NO_HZ_FULL @@ -1898,10 +1900,14 @@ extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); #endif #else -static inline void sched_autogroup_create_attach(struct task_struct *p) { } -static inline void sched_autogroup_detach(struct task_struct *p) { } -static inline void sched_autogroup_fork(struct signal_struct *sig) { } -static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_create_attach(struct task_struct *p + __maybe_unused) { } +static inline void sched_autogroup_detach(struct task_struct *p + __maybe_unused) { } +static inline void sched_autogroup_fork(struct signal_struct *sig + __maybe_unused) { } +static inline void sched_autogroup_exit(struct signal_struct *sig + __maybe_unused) { } #endif extern bool yield_to(struct task_struct *p, bool preempt); @@ -1992,7 +1998,7 @@ #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); #else - static inline void kick_process(struct task_struct *tsk) { } + static inline void kick_process(struct task_struct *tsk __attribute__((unused))) { } #endif extern void sched_fork(struct task_struct *p); extern void sched_dead(struct task_struct *p); @@ -2154,8 +2160,8 @@ extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else static inline void scheduler_ipi(void) { } -static inline unsigned long wait_task_inactive(struct task_struct *p, - long match_state) +static inline unsigned long wait_task_inactive(struct task_struct *p __attribute__((unused)), + long match_state __attribute__((unused))) { return 1; } @@ -2311,10 +2317,12 @@ up_write(&tsk->signal->group_rwsem); } #else -static inline void threadgroup_change_begin(struct task_struct *tsk) {} -static inline void threadgroup_change_end(struct task_struct *tsk) {} -static inline void threadgroup_lock(struct task_struct *tsk) {} -static inline void threadgroup_unlock(struct task_struct *tsk) {} +static inline void threadgroup_change_begin(struct task_struct *tsk + __maybe_unused) {} +static inline void threadgroup_change_end(struct task_struct *tsk + __maybe_unused) {} +static inline void threadgroup_lock(struct task_struct *tsk __maybe_unused) {} +static inline void threadgroup_unlock(struct task_struct *tsk __maybe_unused) {} #endif #ifndef __HAVE_THREAD_FUNCTIONS @@ -2475,7 +2483,7 @@ * task waiting?: (technically does not depend on CONFIG_PREEMPT, * but a general need for low latency) */ -static inline int spin_needbreak(spinlock_t *lock) +static inline int spin_needbreak(spinlock_t *lock __maybe_unused) { #ifdef CONFIG_PREEMPT return spin_is_contended(lock); @@ -2575,7 +2583,10 @@ } #else -static inline int tsk_is_polling(struct task_struct *p) { return 0; } +static inline int tsk_is_polling(struct task_struct *p __maybe_unused) +{ + return 0; +} static inline void __current_set_polling(void) { } static inline void __current_clr_polling(void) { } @@ -2634,12 +2645,12 @@ #else -static inline unsigned int task_cpu(const struct task_struct *p) +static inline unsigned int task_cpu(const struct task_struct *p __attribute__((unused))) { return 0; } -static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +static inline void set_task_cpu(struct task_struct *p __attribute__((unused)), unsigned int cpu __attribute__((unused))) { } @@ -2676,19 +2687,21 @@ tsk->ioac.syscw++; } #else -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +static inline void add_rchar(struct task_struct *tsk __maybe_unused, + ssize_t amt __maybe_unused) { } -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +static inline void add_wchar(struct task_struct *tsk __maybe_unused, + ssize_t amt __maybe_unused) { } -static inline void inc_syscr(struct task_struct *tsk) +static inline void inc_syscr(struct task_struct *tsk __maybe_unused) { } -static inline void inc_syscw(struct task_struct *tsk) +static inline void inc_syscw(struct task_struct *tsk __maybe_unused) { } #endif @@ -2701,11 +2714,12 @@ extern void mm_update_next_owner(struct mm_struct *mm); extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); #else -static inline void mm_update_next_owner(struct mm_struct *mm) +static inline void mm_update_next_owner(struct mm_struct *mm __maybe_unused) { } -static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) +static inline void mm_init_owner(struct mm_struct *mm __maybe_unused, + struct task_struct *p __maybe_unused) { } #endif /* CONFIG_MM_OWNER */