]> Pileus Git - ~andy/linux/commitdiff
crypto: nx - fix limits to sg lists for AES-XCBC
authorFionnuala Gunter <fin@linux.vnet.ibm.com>
Thu, 29 Aug 2013 14:36:36 +0000 (11:36 -0300)
committerHerbert Xu <herbert@gondor.apana.org.au>
Mon, 2 Sep 2013 10:32:54 +0000 (20:32 +1000)
This patch updates the NX driver to perform several hyper calls when necessary
so that the length limits of scatter/gather lists are respected.

Reviewed-by: Joy Latten <jmlatten@linux.vnet.ibm.com>
Reviewed-by: Marcelo Cerri <mhcerri@linux.vnet.ibm.com>
Signed-off-by: Fionnuala Gunter <fin@linux.vnet.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/nx/nx-aes-xcbc.c

index 658da0fd3e1f18f241143fe1e4d3c12a4f0b8af0..1a5d9e372b4e9ee8b13d35418398dc886173c042 100644 (file)
@@ -88,78 +88,97 @@ static int nx_xcbc_update(struct shash_desc *desc,
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct nx_sg *in_sg;
-       u32 to_process, leftover;
+       u32 to_process, leftover, total;
+       u32 max_sg_len;
        unsigned long irq_flags;
        int rc = 0;
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-       if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
-               /* we've hit the nx chip previously and we're updating again,
-                * so copy over the partial digest */
-               memcpy(csbcpb->cpb.aes_xcbc.cv,
-                      csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
-       }
+
+       total = sctx->count + len;
 
        /* 2 cases for total data len:
         *  1: <= AES_BLOCK_SIZE: copy into state, return 0
         *  2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
         */
-       if (len + sctx->count <= AES_BLOCK_SIZE) {
+       if (total <= AES_BLOCK_SIZE) {
                memcpy(sctx->buffer + sctx->count, data, len);
                sctx->count += len;
                goto out;
        }
 
-       /* to_process: the AES_BLOCK_SIZE data chunk to process in this
-        * update */
-       to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1);
-       leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1);
-
-       /* the hardware will not accept a 0 byte operation for this algorithm
-        * and the operation MUST be finalized to be correct. So if we happen
-        * to get an update that falls on a block sized boundary, we must
-        * save off the last block to finalize with later. */
-       if (!leftover) {
-               to_process -= AES_BLOCK_SIZE;
-               leftover = AES_BLOCK_SIZE;
-       }
-
-       if (sctx->count) {
-               in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer,
-                                        sctx->count, nx_ctx->ap->sglen);
-               in_sg = nx_build_sg_list(in_sg, (u8 *)data,
-                                        to_process - sctx->count,
-                                        nx_ctx->ap->sglen);
+       in_sg = nx_ctx->in_sg;
+       max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+                               nx_ctx->ap->sglen);
+
+       do {
+
+               /* to_process: the AES_BLOCK_SIZE data chunk to process in this
+                * update */
+               to_process = min_t(u64, total, nx_ctx->ap->databytelen);
+               to_process = min_t(u64, to_process,
+                                       NX_PAGE_SIZE * (max_sg_len - 1));
+               to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+               leftover = total - to_process;
+
+               /* the hardware will not accept a 0 byte operation for this
+                * algorithm and the operation MUST be finalized to be correct.
+                * So if we happen to get an update that falls on a block sized
+                * boundary, we must save off the last block to finalize with
+                * later. */
+               if (!leftover) {
+                       to_process -= AES_BLOCK_SIZE;
+                       leftover = AES_BLOCK_SIZE;
+               }
+
+               if (sctx->count) {
+                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                                               (u8 *) sctx->buffer,
+                                               sctx->count,
+                                               max_sg_len);
+               }
+               in_sg = nx_build_sg_list(in_sg,
+                                       (u8 *) data,
+                                       to_process - sctx->count,
+                                       max_sg_len);
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
                                        sizeof(struct nx_sg);
-       } else {
-               in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process,
-                                        nx_ctx->ap->sglen);
-               nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
-                                       sizeof(struct nx_sg);
-       }
 
-       NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+               /* we've hit the nx chip previously and we're updating again,
+                * so copy over the partial digest */
+               if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+                       memcpy(csbcpb->cpb.aes_xcbc.cv,
+                               csbcpb->cpb.aes_xcbc.out_cv_mac,
+                               AES_BLOCK_SIZE);
+               }
+
+               NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+               if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+                       rc = -EINVAL;
+                       goto out;
+               }
+
+               rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+                          desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+               if (rc)
+                       goto out;
 
-       if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
-               rc = -EINVAL;
-               goto out;
-       }
+               atomic_inc(&(nx_ctx->stats->aes_ops));
 
-       rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-                          desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
-       if (rc)
-               goto out;
+               /* everything after the first update is continuation */
+               NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       atomic_inc(&(nx_ctx->stats->aes_ops));
+               total -= to_process;
+               data += to_process - sctx->count;
+               sctx->count = 0;
+               in_sg = nx_ctx->in_sg;
+       } while (leftover > AES_BLOCK_SIZE);
 
        /* copy the leftover back into the state struct */
-       memcpy(sctx->buffer, data + len - leftover, leftover);
+       memcpy(sctx->buffer, data, leftover);
        sctx->count = leftover;
 
-       /* everything after the first update is continuation */
-       NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 out:
        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;