--- zzzz-none-000/linux-3.10.107/lib/idr.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/lib/idr.c 2021-02-04 17:41:59.000000000 +0000 @@ -18,12 +18,6 @@ * pointer or what ever, we treat it as a (void *). You can pass this * id to a user for him to pass back at a later time. You then pass * that id to this code and it returns your pointer. - - * You can release ids at any time. When all ids are released, most of - * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we - * don't need to go to the memory "store" during an id allocate, just - * so you don't need to be too concerned about locking and conflicts - * with the slab allocator. */ #ifndef TEST // to test in user space... @@ -36,7 +30,6 @@ #include #include #include -#include #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) @@ -151,7 +144,7 @@ static inline void free_layer(struct idr *idr, struct idr_layer *p) { - if (idr->hint && idr->hint == p) + if (idr->hint == p) RCU_INIT_POINTER(idr->hint, NULL); call_rcu(&p->rcu_head, idr_layer_rcu_free); } @@ -196,7 +189,7 @@ } } -int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) +static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) { while (idp->id_free_cnt < MAX_IDR_FREE) { struct idr_layer *new; @@ -207,7 +200,6 @@ } return 1; } -EXPORT_SYMBOL(__idr_pre_get); /** * sub_alloc - try to allocate an id without growing the tree depth @@ -374,20 +366,6 @@ idr_mark_full(pa, id); } -int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) -{ - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - int rv; - - rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); - if (rv < 0) - return rv == -ENOMEM ? -EAGAIN : rv; - - idr_fill_slot(idp, ptr, rv, pa); - *id = rv; - return 0; -} -EXPORT_SYMBOL(__idr_get_new_above); /** * idr_preload - preload for idr_alloc() @@ -421,7 +399,7 @@ * allocation guarantee. Disallow usage from those contexts. */ WARN_ON_ONCE(in_interrupt()); - might_sleep_if(gfp_mask & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); preempt_disable(); @@ -475,7 +453,7 @@ struct idr_layer *pa[MAX_IDR_LEVEL + 1]; int id; - might_sleep_if(gfp_mask & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); /* sanity checks */ if (WARN_ON_ONCE(start < 0)) @@ -524,9 +502,7 @@ static void idr_remove_warning(int id) { - printk(KERN_WARNING - "idr_remove called for id=%d which is not allocated.\n", id); - dump_stack(); + WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); } static void sub_remove(struct idr *idp, int shift, int id) @@ -550,7 +526,7 @@ n = id & IDR_MASK; if (likely(p != NULL && test_bit(n, p->bitmap))) { __clear_bit(n, p->bitmap); - rcu_assign_pointer(p->ary[n], NULL); + RCU_INIT_POINTER(p->ary[n], NULL); to_free = NULL; while(*paa && ! --((**paa)->count)){ if (to_free) @@ -579,6 +555,11 @@ if (id < 0) return; + if (id > idr_max(idp->layers)) { + idr_remove_warning(id); + return; + } + sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); if (idp->top && idp->top->count == 1 && (idp->layers > 1) && idp->top->ary[0]) { @@ -596,20 +577,10 @@ bitmap_clear(to_free->bitmap, 0, IDR_SIZE); free_layer(idp, to_free); } - while (idp->id_free_cnt >= MAX_IDR_FREE) { - p = get_from_free_list(idp); - /* - * Note: we don't call the rcu callback here, since the only - * layers that fall into the freelist are those that have been - * preallocated. - */ - kmem_cache_free(idr_layer_cache, p); - } - return; } EXPORT_SYMBOL(idr_remove); -void __idr_remove_all(struct idr *idp) +static void __idr_remove_all(struct idr *idp) { int n, id, max; int bt_mask; @@ -618,31 +589,31 @@ struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; - p = idp->top; - rcu_assign_pointer(idp->top, NULL); + *paa = idp->top; + RCU_INIT_POINTER(idp->top, NULL); max = idr_max(idp->layers); id = 0; while (id >= 0 && id <= max) { + p = *paa; while (n > IDR_BITS && p) { n -= IDR_BITS; - *paa++ = p; p = p->ary[(id >> n) & IDR_MASK]; + *++paa = p; } bt_mask = id; id += 1 << n; /* Get the highest bit that the above add changed from 0->1. */ while (n < fls(id ^ bt_mask)) { - if (p) - free_layer(idp, p); + if (*paa) + free_layer(idp, *paa); n += IDR_BITS; - p = *--paa; + --paa; } } idp->layers = 0; } -EXPORT_SYMBOL(__idr_remove_all); /** * idr_destroy - release all cached layers within an idr tree @@ -654,7 +625,7 @@ * idr_destroy(). * * A typical clean-up sequence for objects stored in an idr tree will use - * idr_for_each() to free all objects, if necessay, then idr_destroy() to + * idr_for_each() to free all objects, if necessary, then idr_destroy() to * free up the id mappings and cached idr_layers. */ void idr_destroy(struct idr *idp) @@ -721,15 +692,16 @@ struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; - p = rcu_dereference_raw(idp->top); + *paa = rcu_dereference_raw(idp->top); max = idr_max(idp->layers); id = 0; while (id >= 0 && id <= max) { + p = *paa; while (n > 0 && p) { n -= IDR_BITS; - *paa++ = p; p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); + *++paa = p; } if (p) { @@ -741,7 +713,7 @@ id += 1 << n; while (n < fls(id)) { n += IDR_BITS; - p = *--paa; + --paa; } } @@ -769,17 +741,18 @@ int n, max; /* find first ent */ - p = rcu_dereference_raw(idp->top); + p = *paa = rcu_dereference_raw(idp->top); if (!p) return NULL; n = (p->layer + 1) * IDR_BITS; max = idr_max(p->layer + 1); while (id >= 0 && id <= max) { + p = *paa; while (n > 0 && p) { n -= IDR_BITS; - *paa++ = p; p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); + *++paa = p; } if (p) { @@ -797,7 +770,7 @@ id = round_up(id + 1, 1 << n); while (n < fls(id)) { n += IDR_BITS; - p = *--paa; + --paa; } } return NULL; @@ -827,10 +800,10 @@ p = idp->top; if (!p) - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOENT); if (id > idr_max(p->layer + 1)) - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOENT); n = p->layer * IDR_BITS; while ((n > 0) && p) { @@ -869,6 +842,16 @@ } EXPORT_SYMBOL(idr_init); +static int idr_has_entry(int id, void *p, void *data) +{ + return 1; +} + +bool idr_is_empty(struct idr *idp) +{ + return !idr_for_each(idp, idr_has_entry, NULL); +} +EXPORT_SYMBOL(idr_is_empty); /** * DOC: IDA description @@ -1033,6 +1016,9 @@ int n; struct ida_bitmap *bitmap; + if (idr_id > idr_max(ida->idr.layers)) + goto err; + /* clear full bits while looking up the leaf idr_layer */ while ((shift > 0) && p) { n = (idr_id >> shift) & IDR_MASK; @@ -1048,7 +1034,7 @@ __clear_bit(n, p->bitmap); bitmap = (void *)p->ary[n]; - if (!test_bit(offset, bitmap->bitmap)) + if (!bitmap || !test_bit(offset, bitmap->bitmap)) goto err; /* update bitmap and remove it if empty */ @@ -1062,8 +1048,7 @@ return; err: - printk(KERN_WARNING - "ida_remove called for id=%d which is not allocated.\n", id); + WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); } EXPORT_SYMBOL(ida_remove);