--- zzzz-none-000/linux-3.10.107/include/linux/rcutiny.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/linux/rcutiny.h 2021-02-04 17:41:59.000000000 +0000 @@ -12,8 +12,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. * * Copyright IBM Corporation, 2008 * @@ -27,8 +27,24 @@ #include -static inline void rcu_init(void) +static inline unsigned long get_state_synchronize_rcu(void) { + return 0; +} + +static inline void cond_synchronize_rcu(unsigned long oldstate) +{ + might_sleep(); +} + +static inline unsigned long get_state_synchronize_sched(void) +{ + return 0; +} + +static inline void cond_synchronize_sched(unsigned long oldstate) +{ + might_sleep(); } static inline void rcu_barrier_bh(void) @@ -41,8 +57,6 @@ wait_rcu_gp(call_rcu_sched); } -#ifdef CONFIG_TINY_RCU - static inline void synchronize_rcu_expedited(void) { synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ @@ -53,17 +67,6 @@ rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ } -#else /* #ifdef CONFIG_TINY_RCU */ - -void synchronize_rcu_expedited(void); - -static inline void rcu_barrier(void) -{ - wait_rcu_gp(call_rcu); -} - -#endif /* #else #ifdef CONFIG_TINY_RCU */ - static inline void synchronize_rcu_bh(void) { synchronize_sched(); @@ -80,62 +83,68 @@ } static inline void kfree_call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)) + rcu_callback_t func) { call_rcu(head, func); } -#ifdef CONFIG_TINY_RCU +static inline void rcu_note_context_switch(void) +{ + rcu_sched_qs(); +} -static inline void rcu_preempt_note_context_switch(void) +/* + * Take advantage of the fact that there is only one CPU, which + * allows us to ignore virtualization-based context switches. + */ +static inline void rcu_virt_note_context_switch(int cpu) { } -static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) +/* + * Return the number of grace periods started. + */ +static inline unsigned long rcu_batches_started(void) { - *delta_jiffies = ULONG_MAX; return 0; } -#else /* #ifdef CONFIG_TINY_RCU */ - -void rcu_preempt_note_context_switch(void); -int rcu_preempt_needs_cpu(void); - -static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) +/* + * Return the number of bottom-half grace periods started. + */ +static inline unsigned long rcu_batches_started_bh(void) { - *delta_jiffies = ULONG_MAX; - return rcu_preempt_needs_cpu(); + return 0; } -#endif /* #else #ifdef CONFIG_TINY_RCU */ - -static inline void rcu_note_context_switch(int cpu) +/* + * Return the number of sched grace periods started. + */ +static inline unsigned long rcu_batches_started_sched(void) { - rcu_sched_qs(cpu); - rcu_preempt_note_context_switch(); + return 0; } /* - * Take advantage of the fact that there is only one CPU, which - * allows us to ignore virtualization-based context switches. + * Return the number of grace periods completed. */ -static inline void rcu_virt_note_context_switch(int cpu) +static inline unsigned long rcu_batches_completed(void) { + return 0; } /* - * Return the number of grace periods. + * Return the number of bottom-half grace periods completed. */ -static inline long rcu_batches_completed(void) +static inline unsigned long rcu_batches_completed_bh(void) { return 0; } /* - * Return the number of bottom-half grace periods. + * Return the number of sched grace periods completed. */ -static inline long rcu_batches_completed_bh(void) +static inline unsigned long rcu_batches_completed_sched(void) { return 0; } @@ -152,17 +161,62 @@ { } +static inline void show_rcu_gp_kthreads(void) +{ +} + static inline void rcu_cpu_stall_reset(void) { } +static inline void rcu_idle_enter(void) +{ +} + +static inline void rcu_idle_exit(void) +{ +} + +static inline void rcu_irq_enter(void) +{ +} + +static inline void rcu_irq_exit(void) +{ +} + +static inline void exit_rcu(void) +{ +} + #ifdef CONFIG_DEBUG_LOCK_ALLOC extern int rcu_scheduler_active __read_mostly; -extern void rcu_scheduler_starting(void); +void rcu_scheduler_starting(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ static inline void rcu_scheduler_starting(void) { } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) + +static inline bool rcu_is_watching(void) +{ + return __rcu_is_watching(); +} + +#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ + +static inline bool rcu_is_watching(void) +{ + return true; +} + +#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ + +static inline void rcu_all_qs(void) +{ + barrier(); /* Avoid RCU read-side critical sections leaking across. */ +} + #endif /* __LINUX_RCUTINY_H */