--- zzzz-none-000/linux-4.1.52/net/ipv4/esp4.c 2018-05-28 02:26:45.000000000 +0000 +++ bcm63-7530ax-731/linux-4.1.52/net/ipv4/esp4.c 2022-03-02 11:37:13.000000000 +0000 @@ -18,6 +18,11 @@ #include #include + +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) +#include +#endif + struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; @@ -135,12 +140,34 @@ int sglists; int seqhilen; __be32 *seqhi; - +#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)) + u8 next_hdr; +#endif +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) +#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) + struct crypto_tfm *ctfm; + struct crypto_alg *calg; +#endif +#endif /* skb is pure payload to encrypt */ aead = x->data; alen = crypto_aead_authsize(aead); +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) +#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) + ctfm = crypto_aead_tfm(aead); + calg = ctfm->__crt_alg; + if ( !(calg->cra_flags & CRYPTO_ALG_BLOG) ) + { + blog_skip(skb, blog_skip_reason_esp4_crypto_algo); + } else { + crypto_tfm_set_flags(ctfm, CRYPTO_TFM_REQ_MAY_BLOG); + } +#else + blog_skip(skb, blog_skip_reason_esp4_spu_disabled); +#endif +#endif tfclen = 0; if (x->tfcpad) { struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); @@ -194,6 +221,9 @@ } while (0); tail[plen - 2] = plen - 2; tail[plen - 1] = *skb_mac_header(skb); +#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && (defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)) + next_hdr = *skb_mac_header(skb); +#endif pskb_put(skb, trailer, clen - skb->len + alen); skb_push(skb, -skb_network_offset(skb)); @@ -239,10 +269,19 @@ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); sg_init_table(sg, nfrags); +#if defined(CONFIG_BCM_KF_MISC_BACKPORTS) + err = +#endif skb_to_sgvec(skb, sg, esph->enc_data + crypto_aead_ivsize(aead) - skb->data, clen + alen); +#if defined(CONFIG_BCM_KF_MISC_BACKPORTS) + if(unlikely(err) < 0) + goto error; +#endif + + if ((x->props.flags & XFRM_STATE_ESN)) { sg_init_table(asg, 3); sg_set_buf(asg, &esph->spi, sizeof(__be32)); @@ -260,6 +299,25 @@ ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); ESP_SKB_CB(skb)->tmp = tmp; + +#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) +#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE) + req->areq.data_offset = (unsigned char *)esph - skb->data; + req->areq.next_hdr = next_hdr; +#else + /* ensure there is enough headroom and tailroom for HW info */ + if((skb_headroom(skb) < 12) || + (skb_tailroom(skb) < 16)) + { + req->areq.alloc_buff_spu = 1; + } + else + { + req->areq.alloc_buff_spu = 0; + } + req->areq.headerLen = esph->enc_data + crypto_aead_ivsize(aead) - skb->data; +#endif +#endif err = crypto_aead_givencrypt(req); if (err == -EINPROGRESS) goto error; @@ -386,7 +444,27 @@ struct scatterlist *sg; struct scatterlist *asg; int err = -EINVAL; - +#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) && !(defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE)) + int macLen; +#endif +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) +#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) + struct crypto_tfm *ctfm; + struct crypto_alg *calg; + + ctfm = crypto_aead_tfm(aead); + calg = ctfm->__crt_alg; + + if ( !(calg->cra_flags & CRYPTO_ALG_BLOG) ) + { + blog_skip(skb, blog_skip_reason_esp4_crypto_algo); + } else { + crypto_tfm_set_flags(ctfm, CRYPTO_TFM_REQ_MAY_BLOG); + } +#else + blog_skip(skb, blog_skip_reason_esp4_spu_disabled); +#endif +#endif if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) goto out; @@ -429,7 +507,15 @@ iv = esph->enc_data; sg_init_table(sg, nfrags); +#if defined(CONFIG_BCM_KF_MISC_BACKPORTS) + err = +#endif skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); +#if defined(CONFIG_BCM_KF_MISC_BACKPORTS) + if (unlikely(err) < 0) + goto out; +#endif + if ((x->props.flags & XFRM_STATE_ESN)) { sg_init_table(asg, 3); @@ -444,6 +530,27 @@ aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, assoclen); +#if defined(CONFIG_BCM_KF_SPU) && (defined(CONFIG_BCM_SPU) || defined(CONFIG_BCM_SPU_MODULE)) +#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE) + req->data_offset = 0; + req->next_hdr = 0; +#else + /* ensure there is enough headroom and tailroom for HW info */ + if ( (skb->data >= skb_mac_header(skb)) && + (skb_headroom(skb) >= ((skb->data - skb_mac_header(skb)) + 12)) && + (skb_tailroom(skb) >= 16)) + { + macLen = skb->data - skb_mac_header(skb); + req->alloc_buff_spu = 0; + } + else + { + macLen = 0; + req->alloc_buff_spu = 1; + } + req->headerLen = sizeof(*esph) + crypto_aead_ivsize(aead) + macLen; +#endif +#endif err = crypto_aead_decrypt(req); if (err == -EINPROGRESS) goto out;