--- zzzz-none-000/linux-4.1.38/include/linux/sched.h 2017-01-18 18:48:06.000000000 +0000 +++ bcm63-7582-715/linux-4.1.38/include/linux/sched.h 2020-11-25 10:06:48.000000000 +0000 @@ -337,9 +337,9 @@ extern void set_cpu_sd_state_idle(void); extern int get_nohz_timer_target(int pinned); #else -static inline void nohz_balance_enter_idle(int cpu) { } +static inline void nohz_balance_enter_idle(int cpu __attribute__((unused))) { } static inline void set_cpu_sd_state_idle(void) { } -static inline int get_nohz_timer_target(int pinned) +static inline int get_nohz_timer_target(int pinned __maybe_unused) { return smp_processor_id(); } @@ -1085,12 +1085,12 @@ struct sched_domain_attr; static inline void -partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new) +partition_sched_domains(int ndoms_new __attribute__((unused)), cpumask_var_t doms_new[] __attribute__((unused)), + struct sched_domain_attr *dattr_new __attribute__((unused))) { } -static inline bool cpus_share_cache(int this_cpu, int that_cpu) +static inline bool cpus_share_cache(int this_cpu __attribute__((unused)), int that_cpu __attribute__((unused))) { return true; } @@ -1104,7 +1104,7 @@ #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK extern void prefetch_stack(struct task_struct *t); #else -static inline void prefetch_stack(struct task_struct *t) { } +static inline void prefetch_stack(struct task_struct *t __maybe_unused) { } #endif struct audit_context; /* See audit.c */ @@ -1745,22 +1745,22 @@ extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, int src_nid, int dst_cpu); #else -static inline void task_numa_fault(int last_node, int node, int pages, - int flags) +static inline void task_numa_fault(int last_node __maybe_unused, int node __maybe_unused, int pages __maybe_unused, + int flags __maybe_unused) { } -static inline pid_t task_numa_group_id(struct task_struct *p) +static inline pid_t task_numa_group_id(struct task_struct *p __maybe_unused) { return 0; } -static inline void set_numabalancing_state(bool enabled) +static inline void set_numabalancing_state(bool enabled __maybe_unused) { } -static inline void task_numa_free(struct task_struct *p) +static inline void task_numa_free(struct task_struct *p __maybe_unused) { } -static inline bool should_numa_migrate_memory(struct task_struct *p, - struct page *page, int src_nid, int dst_cpu) +static inline bool should_numa_migrate_memory(struct task_struct *p __maybe_unused, + struct page *page __maybe_unused, int src_nid __maybe_unused, int dst_cpu __maybe_unused) { return true; } @@ -2129,11 +2129,11 @@ extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); #else -static inline void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask) +static inline void do_set_cpus_allowed(struct task_struct *p __attribute__((unused)), + const struct cpumask *new_mask __attribute__((unused))) { } -static inline int set_cpus_allowed_ptr(struct task_struct *p, +static inline int set_cpus_allowed_ptr(struct task_struct *p __attribute__((unused)), const struct cpumask *new_mask) { if (!cpumask_test_cpu(0, new_mask)) @@ -2186,7 +2186,7 @@ { } -static inline void sched_clock_idle_wakeup_event(u64 delta_ns) +static inline void sched_clock_idle_wakeup_event(u64 delta_ns __maybe_unused) { } #else @@ -2240,7 +2240,7 @@ #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) extern void wake_up_nohz_cpu(int cpu); #else -static inline void wake_up_nohz_cpu(int cpu) { } +static inline void wake_up_nohz_cpu(int cpu __attribute__((unused))) { } #endif #ifdef CONFIG_NO_HZ_FULL @@ -2260,10 +2260,14 @@ extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); #endif #else -static inline void sched_autogroup_create_attach(struct task_struct *p) { } -static inline void sched_autogroup_detach(struct task_struct *p) { } -static inline void sched_autogroup_fork(struct signal_struct *sig) { } -static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_create_attach(struct task_struct *p + __maybe_unused) { } +static inline void sched_autogroup_detach(struct task_struct *p + __maybe_unused) { } +static inline void sched_autogroup_fork(struct signal_struct *sig + __maybe_unused) { } +static inline void sched_autogroup_exit(struct signal_struct *sig + __maybe_unused) { } #endif extern int yield_to(struct task_struct *p, bool preempt); @@ -2360,7 +2364,7 @@ #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); #else - static inline void kick_process(struct task_struct *tsk) { } + static inline void kick_process(struct task_struct *tsk __attribute__((unused))) { } #endif extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern void sched_dead(struct task_struct *p); @@ -2527,8 +2531,8 @@ extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else static inline void scheduler_ipi(void) { } -static inline unsigned long wait_task_inactive(struct task_struct *p, - long match_state) +static inline unsigned long wait_task_inactive(struct task_struct *p __attribute__((unused)), + long match_state __attribute__((unused))) { return 1; } @@ -2684,10 +2688,12 @@ up_write(&tsk->signal->group_rwsem); } #else -static inline void threadgroup_change_begin(struct task_struct *tsk) {} -static inline void threadgroup_change_end(struct task_struct *tsk) {} -static inline void threadgroup_lock(struct task_struct *tsk) {} -static inline void threadgroup_unlock(struct task_struct *tsk) {} +static inline void threadgroup_change_begin(struct task_struct *tsk + __maybe_unused) {} +static inline void threadgroup_change_end(struct task_struct *tsk + __maybe_unused) {} +static inline void threadgroup_lock(struct task_struct *tsk __maybe_unused) {} +static inline void threadgroup_unlock(struct task_struct *tsk __maybe_unused) {} #endif #ifndef __HAVE_THREAD_FUNCTIONS @@ -2862,7 +2868,7 @@ * task waiting?: (technically does not depend on CONFIG_PREEMPT, * but a general need for low latency) */ -static inline int spin_needbreak(spinlock_t *lock) +static inline int spin_needbreak(spinlock_t *lock __maybe_unused) { #ifdef CONFIG_PREEMPT return spin_is_contended(lock); @@ -2918,7 +2924,7 @@ } #else -static inline int tsk_is_polling(struct task_struct *p) { return 0; } +static inline int tsk_is_polling(struct task_struct *p __maybe_unused) { return 0; } static inline void __current_set_polling(void) { } static inline void __current_clr_polling(void) { } @@ -3002,12 +3008,12 @@ #else -static inline unsigned int task_cpu(const struct task_struct *p) +static inline unsigned int task_cpu(const struct task_struct *p __attribute__((unused))) { return 0; } -static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +static inline void set_task_cpu(struct task_struct *p __attribute__((unused)), unsigned int cpu __attribute__((unused))) { } @@ -3044,19 +3050,21 @@ tsk->ioac.syscw++; } #else -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +static inline void add_rchar(struct task_struct *tsk __maybe_unused, + ssize_t amt __maybe_unused) { } -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +static inline void add_wchar(struct task_struct *tsk __maybe_unused, + ssize_t amt __maybe_unused) { } -static inline void inc_syscr(struct task_struct *tsk) +static inline void inc_syscr(struct task_struct *tsk __maybe_unused) { } -static inline void inc_syscw(struct task_struct *tsk) +static inline void inc_syscw(struct task_struct *tsk __maybe_unused) { } #endif @@ -3068,7 +3076,7 @@ #ifdef CONFIG_MEMCG extern void mm_update_next_owner(struct mm_struct *mm); #else -static inline void mm_update_next_owner(struct mm_struct *mm) +static inline void mm_update_next_owner(struct mm_struct *mm __maybe_unused) { } #endif /* CONFIG_MEMCG */