--- zzzz-none-000/linux-2.6.13.1/net/core/dev.c 2005-09-10 02:42:58.000000000 +0000 +++ ohio-7170-487/linux-2.6.13.1/net/core/dev.c 2007-12-20 13:56:31.000000000 +0000 @@ -146,6 +146,8 @@ static DEFINE_SPINLOCK(ptype_lock); static struct list_head ptype_base[16]; /* 16 way hashed list */ static struct list_head ptype_all; /* Taps */ +static int (*avm_recvhook)(struct sk_buff *skb); +static int (*avm_early_recvhook)(struct sk_buff *skb); /* * The @dev_base list is protected by @dev_base_lock and the rtln @@ -217,6 +219,20 @@ *******************************************************************************/ +void set_avm_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + spin_lock_bh(&ptype_lock); + avm_recvhook = recvhook; + spin_unlock_bh(&ptype_lock); +} + +void set_avm_early_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + spin_lock_bh(&ptype_lock); + avm_early_recvhook = recvhook; + spin_unlock_bh(&ptype_lock); +} + /* * For efficiency */ @@ -247,7 +263,7 @@ * is linked into kernel lists and may not be freed until it has been * removed from the kernel lists. * - * This call does not sleep therefore it can not + * This call does not sleep therefore it can not * guarantee all CPU's that are in middle of receiving packets * will see the new packet type (until the next received packet). */ @@ -278,7 +294,7 @@ * Remove a protocol handler that was previously added to the kernel * protocol handlers by dev_add_pack(). The passed &packet_type is removed * from the kernel lists and can be freed or reused once this function - * returns. + * returns. * * The packet type might still be in use by receivers * and must not be freed until after all the CPU's have gone @@ -323,7 +339,7 @@ void dev_remove_pack(struct packet_type *pt) { __dev_remove_pack(pt); - + synchronize_net(); } @@ -601,7 +617,7 @@ * @mask: bitmask of bits in if_flags to check * * Search for any interface with the given flags. Returns NULL if a device - * is not found or a pointer to the device. The device returned has + * is not found or a pointer to the device. The device returned has * had a reference added and the pointer is safe until the user calls * dev_put to indicate they have finished with it. */ @@ -630,7 +646,7 @@ */ static int dev_valid_name(const char *name) { - return !(*name == '\0' + return !(*name == '\0' || !strcmp(name, ".") || !strcmp(name, "..") || strchr(name, '/')); @@ -782,7 +798,7 @@ void dev_load(const char *name) { - struct net_device *dev; + struct net_device *dev; read_lock(&dev_base_lock); dev = __dev_get_by_name(name); @@ -944,7 +960,7 @@ * is returned on a failure. * * When registered all registration and up events are replayed - * to the new notifier to allow device to have a race free + * to the new notifier to allow device to have a race free * view of the network device list. */ @@ -959,7 +975,7 @@ for (dev = dev_base; dev; dev = dev->next) { nb->notifier_call(nb, NETDEV_REGISTER, dev); - if (dev->flags & IFF_UP) + if (dev->flags & IFF_UP) nb->notifier_call(nb, NETDEV_UP, dev); } } @@ -1096,7 +1112,7 @@ *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; -out: +out: return ret; } @@ -1229,6 +1245,8 @@ struct Qdisc *q; int rc = -ENOMEM; + skb_trace(skb, 18); + if (skb_shinfo(skb)->frag_list && !(dev->features & NETIF_F_FRAGLIST) && __skb_linearize(skb, GFP_ATOMIC)) @@ -1253,19 +1271,19 @@ if (skb_checksum_help(skb, 0)) goto out_kfree_skb; - /* Disable soft irqs for various locks below. Also - * stops preemption for RCU. + /* Disable soft irqs for various locks below. Also + * stops preemption for RCU. */ - local_bh_disable(); + local_bh_disable(); - /* Updates of qdisc are serialized by queue_lock. - * The struct Qdisc which is pointed to by qdisc is now a - * rcu structure - it may be accessed without acquiring + /* Updates of qdisc are serialized by queue_lock. + * The struct Qdisc which is pointed to by qdisc is now a + * rcu structure - it may be accessed without acquiring * a lock (but the structure may be stale.) The freeing of the - * qdisc will be deferred until it's known that there are no + * qdisc will be deferred until it's known that there are no * more references to it. - * - * If the qdisc has an enqueue function, we still need to + * + * If the qdisc has an enqueue function, we still need to * hold the queue_lock before calling it, since queue_lock * also serializes access to the device queue. */ @@ -1375,6 +1393,8 @@ struct softnet_data *queue; unsigned long flags; + skb_trace(skb, 10); + /* if netpoll wants it, pretend we never saw it */ if (netpoll_rx(skb)) return NET_RX_DROP; @@ -1505,10 +1525,12 @@ return 0; if (*pt_prev) { + skb_trace(*pskb, 11); *ret = deliver_skb(*pskb, *pt_prev); *pt_prev = NULL; - } - + } + + skb_trace(*pskb, 12); return br_handle_frame_hook(port, pskb); } #else @@ -1520,16 +1542,16 @@ * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions * a compare and 2 stores extra right now if we dont have it on * but have CONFIG_NET_CLS_ACT - * NOTE: This doesnt stop any functionality; if you dont have + * NOTE: This doesnt stop any functionality; if you dont have * the ingress scheduler, you just cant add policies on ingress. * */ -static int ing_filter(struct sk_buff *skb) +static int ing_filter(struct sk_buff *skb) { struct Qdisc *q; struct net_device *dev = skb->dev; int result = TC_ACT_OK; - + if (dev->qdisc_ingress) { __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); if (MAX_RED_LOOP < ttl++) { @@ -1589,8 +1611,10 @@ list_for_each_entry_rcu(ptype, &ptype_all, list) { if (!ptype->dev || ptype->dev == skb->dev) { - if (pt_prev) + if (pt_prev) { + skb_trace(skb, 11); ret = deliver_skb(skb, pt_prev); + } pt_prev = ptype; } } @@ -1614,22 +1638,41 @@ ncls: #endif + if (avm_early_recvhook && (*avm_early_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } + handle_diverter(skb); if (handle_bridge(&skb, &pt_prev, &ret)) goto out; + if (avm_recvhook && (*avm_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } + type = skb->protocol; list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) { if (ptype->type == type && (!ptype->dev || ptype->dev == skb->dev)) { - if (pt_prev) + if (pt_prev) { + skb_trace(skb, 13); ret = deliver_skb(skb, pt_prev); + } pt_prev = ptype; } } if (pt_prev) { + skb_trace(skb, 14); ret = pt_prev->func(skb, skb->dev, pt_prev); } else { kfree_skb(skb); @@ -2060,7 +2103,7 @@ } slave->master = master; - + synchronize_net(); if (old) @@ -2130,7 +2173,7 @@ flags = (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI | - IFF_RUNNING)) | + IFF_RUNNING)) | (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI)); @@ -2668,7 +2711,7 @@ goto out_err; } } - + if (!dev_valid_name(dev->name)) { ret = -EINVAL; goto out_err; @@ -2775,7 +2818,7 @@ if (err < 0) goto out; } - + /* * Back compatibility hook. Kill this one in 2.5 */ @@ -2801,7 +2844,7 @@ * for netdevice notification, and cleanup and put back the * reference if they receive an UNREGISTER event. * We can get stuck here if buggy protocols don't correctly - * call dev_put. + * call dev_put. */ static void netdev_wait_allrefs(struct net_device *dev) { @@ -2887,7 +2930,7 @@ spin_lock(&net_todo_list_lock); list_splice_init(&net_todo_list, &list); spin_unlock(&net_todo_list_lock); - + while (!list_empty(&list)) { struct net_device *dev = list_entry(list.next, struct net_device, todo_list); @@ -2915,7 +2958,7 @@ BUG_TRAP(!dev->dn_ptr); - /* It must be the very last action, + /* It must be the very last action, * after this 'dev' may point to freed up memory. */ if (dev->destructor) @@ -2977,8 +3020,8 @@ * free_netdev - free network device * @dev: device * - * This function does the last stage of destroying an allocated device - * interface. The reference to the device object is released. + * This function does the last stage of destroying an allocated device + * interface. The reference to the device object is released. * If this is the last reference then it will be freed. */ void free_netdev(struct net_device *dev) @@ -2999,9 +3042,9 @@ kfree((char *)dev - dev->padded); #endif } - + /* Synchronize with packet receive processing. */ -void synchronize_net(void) +void synchronize_net(void) { might_sleep(); synchronize_rcu(); @@ -3065,12 +3108,12 @@ /* Shutdown queueing discipline. */ dev_shutdown(dev); - + /* Notify protocols, that we are about to destroy this device. They should clean all the things. */ notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); - + /* * Flush the multicast chain */ @@ -3187,7 +3230,7 @@ goto out; INIT_LIST_HEAD(&ptype_all); - for (i = 0; i < 16; i++) + for (i = 0; i < 16; i++) INIT_LIST_HEAD(&ptype_base[i]); for (i = 0; i < ARRAY_SIZE(dev_name_head); i++) @@ -3220,7 +3263,9 @@ hotcpu_notifier(dev_cpu_callback, 0); dst_init(); +#if defined(CONFIG_PROC_FS) dev_mcast_init(); +#endif /*--- #if defined(CONFIG_PROC_FS) ---*/ rc = 0; out: return rc; @@ -3275,3 +3320,6 @@ #endif EXPORT_PER_CPU_SYMBOL(softnet_data); + +EXPORT_SYMBOL(set_avm_recvhook); +EXPORT_SYMBOL(set_avm_early_recvhook);