--- zzzz-none-000/linux-5.15.111/drivers/mmc/core/block.c 2023-05-11 14:00:40.000000000 +0000 +++ puma7-arm-6670-761/linux-5.15.111/drivers/mmc/core/block.c 2024-02-07 09:27:48.000000000 +0000 @@ -18,6 +18,11 @@ * Author: Andrew Christian * 28 May 2002 */ +/* + * Includes Intel Corporation's changes/modifications dated: 2011, 2017, 2018. + * Changed/modified portions - Copyright (c) 2011-2018, Intel Corporation. + */ + #include #include #include @@ -47,6 +52,16 @@ #include #include +#ifdef CONFIG_ARCH_GEN3 +#include +#include "../core/mmc_ops.h" +#include "../core/core.h" +#endif + +#ifdef CONFIG_ARCH_GEN3 +#include +#endif + #include #include "queue.h" @@ -394,6 +409,810 @@ return 0; } +#ifdef CONFIG_ARCH_GEN3 +static int mmc_blk_bp_getinfo(struct mmc_blk_data *md, unsigned long arg) +{ + struct mmc_card *card; + struct mmc_bp_info bp; + int boot; + if(!md) + return -ENXIO; + card = md->queue.card; + if(!card) + return -ENXIO; + memset(&bp, 0, sizeof(struct mmc_bp_info)); + bp.sectors = card->ext_csd.boot_size_mult * (MMC_BP_UNIT_SIZE/MMC_SECTOR_SIZE); + boot = (card->ext_csd.boot_config >> 3) & 0x07; + switch (boot) { + case 0: + bp.booten = MMC_BOOT_EN_NONE; + break; + case 1: + bp.booten = MMC_BOOT_EN_BP0; + break; + case 2: + bp.booten = MMC_BOOT_EN_BP1; + break; + case 7: + bp.booten = MMC_BOOT_EN_USER; + break; + default: + bp.booten = MMC_BOOT_EN_RESV; + } + if(copy_to_user((void __user *) arg, &bp, sizeof(struct mmc_bp_info))) + return -EFAULT; + return 0; +} +static int mmc_blk_gp_getinfo(struct mmc_blk_data *md, unsigned long arg) +{ + struct mmc_card *card; + struct mmc_gp_info gp; + int i; + if(!md) + return -ENXIO; + card = md->queue.card; + if(!card) + return -ENXIO; + memset(&gp, 0, sizeof(struct mmc_gp_info)); + for (i=0; i < 4; i++) + gp.sectors[i] = card->ext_csd.gp_size[i]; + if(copy_to_user((void __user *) arg, &gp, sizeof(struct mmc_gp_info))) + return -EFAULT; + return 0; +} +static int mmc_blk_card_getinfo(struct mmc_blk_data *md, unsigned long arg) +{ + struct mmc_card *card; + struct mmc_card_info info; + if(!md) + return -ENXIO; + card = md->queue.card; + if(!card) + return -ENXIO; + memset(&info, 0, sizeof(struct mmc_card_info)); + info.rca = card->rca; + info.ocr = card->ocr; + if(copy_to_user((void __user *) arg, &info, sizeof(struct mmc_card_info))) + return -EFAULT; + return 0; +} +static int mmc_blk_reset_host_controller(struct mmc_blk_data *md, unsigned long arg) +{ + struct mmc_card *card; + struct mmc_host *mmc_host; + if(!md) + return -ENXIO; + card = md->queue.card; + if(!card) + { + return -ENXIO; + } + mmc_host = card->host; + /* power off */ + /* copy from "static void mmc_power_off(struct mmc_host *host)"@core/core.c*/ + { + mmc_host->ios.clock = 0; + mmc_host->ios.vdd = 0; + /* + * Reset ocr mask to be the highest possible voltage supported for + * this mmc host. This value will be used at next power up. + */ + card->ocr = 1 << (fls(mmc_host->ocr_avail) - 1); + if (!mmc_host_is_spi(mmc_host)) + { + mmc_host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; + mmc_host->ios.chip_select = MMC_CS_DONTCARE; + } + mmc_host->ios.power_mode = MMC_POWER_OFF; + mmc_host->ios.bus_width = MMC_BUS_WIDTH_1; + mmc_host->ios.timing = MMC_TIMING_LEGACY; + mmc_host->ops->set_ios(mmc_host,&mmc_host->ios); + } + /* Power up */ + /* copy form "static void mmc_power_up(struct mmc_host *host) @core/core.c */ + { + int bit; + /* If ocr is set, we use it */ + if (card->ocr) + bit = ffs(card->ocr) - 1; /* ffs - find first bit set */ + else + bit = fls(mmc_host->ocr_avail) - 1; + mmc_host->ios.vdd = bit; + if (mmc_host_is_spi(mmc_host)) { + mmc_host->ios.chip_select = MMC_CS_HIGH; + mmc_host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; + } else { + mmc_host->ios.chip_select = MMC_CS_DONTCARE; + mmc_host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; + } + mmc_host->ios.power_mode = MMC_POWER_UP; + mmc_host->ios.bus_width = MMC_BUS_WIDTH_1; + mmc_host->ios.timing = MMC_TIMING_LEGACY; + mmc_host->ops->set_ios(mmc_host,&mmc_host->ios); + /* + * This delay should be sufficient to allow the power supply + * to reach the minimum voltage. + */ + mmc_delay(10); + mmc_host->ios.clock = mmc_host->f_init; + mmc_host->ios.power_mode = MMC_POWER_ON; + mmc_host->ops->set_ios(mmc_host,&mmc_host->ios); + /* + * This delay must be at least 74 clock sizes, or 1 ms, or the + * time required to reach a stable voltage. + */ + mmc_delay(10); + } + return 0; +} +static int mmc_blk_set_io_host_controller(struct mmc_blk_data *md, unsigned long arg) +{ + struct mmc_card *card; + struct mmc_host *mmc_host; + struct mmc_host_ctrl_ios host_ctrl_ios; + if(!md) + return -ENXIO; + card = md->queue.card; + if(!card) + { + return -ENXIO; + } + if(copy_from_user(&host_ctrl_ios, (void __user *) arg, sizeof(struct mmc_host_ctrl_ios))) + return -EFAULT; + mmc_host = card->host; + switch (host_ctrl_ios.ios_op_code) + { + case MMC_HOST_CTRL_IOS_SET_HIGH_SPEED: + mmc_host->ios.timing = host_ctrl_ios.timing; + break; + case MMC_HOST_CTRL_IOS_SET_CLK: + if (host_ctrl_ios.clock == MMC_HOST_CTRL_MAX_CLK) + { + if (card->ext_csd.hs_max_dtr > mmc_host->f_max) + mmc_host->ios.clock = mmc_host->f_max; + else + mmc_host->ios.clock = card->ext_csd.hs_max_dtr; + } + else + { + mmc_host->ios.clock = host_ctrl_ios.clock; + } + break; + case MMC_HOST_CTRL_IOS_SET_BUS_WIDTH: + mmc_host->ios.bus_width = host_ctrl_ios.bus_width; + break; + + } + mmc_host->ops->set_ios(mmc_host,&mmc_host->ios); + return 0; +} +static int mmc_blk_init_card(struct mmc_blk_data *md, unsigned long arg) +{ + struct mmc_card *card; + struct mmc_host *mmc_host; + if(!md) + return -ENXIO; + card = md->queue.card; + if(!card) + return -ENXIO; + mmc_host = card->host; + if (mmc_host->bus_ops->resume) + { + if (mmc_host->bus_ops->resume(mmc_host) != 0 ) /* calling to "static int mmc_resume(struct mmc_host *host)" @ core/mmc.c */ + { + return -EIO; + } + } + return 0; +} +static int mmc_blk_claim_host(struct mmc_blk_data *md, unsigned long arg) +{ + struct mmc_card *card; + if(!md) + return -ENXIO; + card = md->queue.card; + if(!card) + return -ENXIO; + mmc_claim_host(card->host); + return 0; +} +static int mmc_blk_release_host(struct mmc_blk_data *md, unsigned long arg) +{ + struct mmc_card *card; + if(!md) + return -ENXIO; + card = md->queue.card; + if(!card) + return -ENXIO; + mmc_release_host(card->host); + return 0; +} +static int mmc_blk_bp_set_acc(struct mmc_card *card, unsigned char mode) +{ + unsigned char val; + val = (card->ext_csd.boot_config & 0xF8) | mode; + return mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, val, 0); +} +static int mmc_blk_set_erase_grp_def(struct mmc_card *card, unsigned char mode) +{ + return mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GROUP_DEF, mode, 0); +} +static int mmc_blk_bp_rw(struct mmc_blk_data *md, unsigned long arg, int part_type) +{ + struct mmc_queue *mq; + struct mmc_card *card; + struct mmc_blk_request brq; + struct mmc_bp_rw bprw; + int retval = 0; + unsigned char dest_part = 0; + if(!md) + return -ENXIO; + mq = &md->queue; + if(!mq) + return -ENXIO; + card = md->queue.card; + if(!card) + return -ENXIO; + if((part_type == 1) && (!(card->ext_csd.boot_size_mult))) { + printk("mmcblk: Error - No boot partition in the card\n"); + return -ENODEV; + } + //if((!mq->bp_sg) || (!mq->bp_buf)) + if(!mq->bp_sg) + return -ENOMEM; + memset(&bprw, 0, sizeof(struct mmc_bp_rw)); + if(copy_from_user(&bprw, (void __user *) arg, sizeof(struct mmc_bp_rw))) + return -EFAULT; + if((part_type == 1) && (bprw.which > MAX_NUM_OF_BOOT_PARTITIONS)) { + printk("mmcblk: Error - Invalid boot partition number\n"); + return -EINVAL; + } + if((part_type == 2) && (bprw.which > 3)) { + printk("mmcblk: Error - Invalid gp partition number\n"); + return -EINVAL; + } + if((part_type == 2) && (!(card->ext_csd.gp_size[bprw.which]))) { + printk("mmcblk: Error - Gp %d doesn't exist\n", bprw.which); + return -ENODEV; + } + if((bprw.dir != BP_DIR_READ) && (bprw.dir != BP_DIR_WRITE)) { + printk("mmcblk: Error - Invalid access direction\n"); + return -EINVAL; + } + if(bprw.nr_sectors > MAX_NUM_OF_SECTORS_ADMA2_PAGE) { + printk("mmcblk: Error - Too many sectors to be tranferred\n"); + return -EINVAL; + } + if((part_type == 1) && ((bprw.st_sector + bprw.nr_sectors) > (card->ext_csd.boot_size_mult * (MMC_BP_UNIT_SIZE/MMC_SECTOR_SIZE)))) { + printk("mmcblk: Error - Access beyond bp size limit\n"); + return -EINVAL; + } + if((part_type == 2) && ((bprw.st_sector + bprw.nr_sectors) > (card->ext_csd.gp_size[bprw.which]))) { + printk("mmcblk: Error - Access beyond gp size limit\n"); + return -EINVAL; + } + mmc_claim_host(card->host); + mq->bp_buf = kmalloc(MAX_NUM_OF_SECTORS_ADMA2_PAGE * MMC_SECTOR_SIZE, GFP_KERNEL); + if (!mq->bp_buf) { + printk("mmcblk: Error - unable to alloc boot partition memory buffer\n"); + return -ENOMEM; + } + if(bprw.dir == BP_DIR_WRITE) { + if(copy_from_user(mq->bp_buf, (void __user *) bprw.buf, bprw.nr_sectors * MMC_SECTOR_SIZE)) { + if (mq->bp_buf) + { + kfree(mq->bp_buf); + mq->bp_buf = NULL; + } + mmc_release_host(card->host); + return -EFAULT; + } + } + if(part_type == 2) { + if(mmc_blk_set_erase_grp_def(card, 1)) { + printk("mmcblk: Error - setting erase group def\n"); + if (mq->bp_buf) + { + kfree(mq->bp_buf); + mq->bp_buf = NULL; + } + mmc_release_host(card->host); + return -EFAULT; + } + } + if(part_type == 1) + dest_part = bprw.which + 1; + else if(part_type == 2) + dest_part = bprw.which + 4; + if(mmc_blk_bp_set_acc(card, dest_part)) { + printk("mmcblk: Error - switching partition access\n"); + if (mq->bp_buf) + { + kfree(mq->bp_buf); + mq->bp_buf = NULL; + } + mmc_release_host(card->host); + return -EFAULT; + } + do { + struct mmc_command cmd; + u32 readcmd, writecmd; + memset(&brq, 0, sizeof(struct mmc_blk_request)); + brq.mrq.cmd = &brq.cmd; + brq.mrq.data = &brq.data; + brq.cmd.arg = bprw.st_sector; + if (!mmc_card_blockaddr(card)) + brq.cmd.arg <<= 9; + brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; + brq.data.blksz = MMC_SECTOR_SIZE; + brq.stop.opcode = MMC_STOP_TRANSMISSION; + brq.stop.arg = 0; + brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + brq.data.blocks = bprw.nr_sectors; + if (brq.data.blocks > 1) { + /* SPI multiblock writes terminate using a special + * token, not a STOP_TRANSMISSION request. + */ + if (!mmc_host_is_spi(card->host) + || bprw.dir == BP_DIR_READ) + brq.mrq.stop = &brq.stop; + readcmd = MMC_READ_MULTIPLE_BLOCK; + writecmd = MMC_WRITE_MULTIPLE_BLOCK; + } else { + brq.mrq.stop = NULL; + readcmd = MMC_READ_SINGLE_BLOCK; + writecmd = MMC_WRITE_BLOCK; + } + if (bprw.dir == BP_DIR_READ) { + brq.cmd.opcode = readcmd; + brq.data.flags |= MMC_DATA_READ; + } else { + brq.cmd.opcode = writecmd; + brq.data.flags |= MMC_DATA_WRITE; + } + mmc_set_data_timeout(&brq.data, card); + brq.data.sg = mq->bp_sg; + sg_init_one(mq->bp_sg, mq->bp_buf, bprw.nr_sectors * MMC_SECTOR_SIZE); + brq.data.sg_len = 1; + mmc_wait_for_req(card->host, &brq.mrq); + /* + * Check for errors here, but don't jump to cmd_err + * until later as we need to wait for the card to leave + * programming mode even when things go wrong. + */ + if (brq.cmd.error) + printk("mmcblk: Error sending read/write command\n"); + if (brq.data.error) + printk("mmcblk: Error transferring data\n"); + if (brq.stop.error) + printk("mmcblk: Error sending stop command\n"); + if (!mmc_host_is_spi(card->host) && bprw.dir != BP_DIR_READ) { + do { + int err; + cmd.opcode = MMC_SEND_STATUS; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + err = mmc_wait_for_cmd(card->host, &cmd, 5); + if (err) { + printk("mmcblk: Error requesting status\n"); + goto cmd_err; + } + /* + * Some cards mishandle the status bits, + * so make sure to check both the busy + * indication and the card state. + */ + } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || + (R1_CURRENT_STATE(cmd.resp[0]) == 7)); + } + if (brq.cmd.error || brq.data.error || brq.stop.error) + goto cmd_err; + } while (0); + if(bprw.dir == BP_DIR_READ) { + if(copy_to_user((void __user *) bprw.buf, mq->bp_buf, bprw.nr_sectors * MMC_SECTOR_SIZE)) + retval = -EFAULT; + } + if(mmc_blk_bp_set_acc(card, 0)) { + printk("mmcblk: Error setting back to user partition access\n"); + retval = -EFAULT; + } + if(part_type == 2) { + if(mmc_blk_set_erase_grp_def(card, card->ext_csd.erase_group_def & 0x1)) { + printk("mmcblk: Error setting erase group def\n"); + retval = -EFAULT; + } + } + if (mq->bp_buf) + { + kfree(mq->bp_buf); + mq->bp_buf = NULL; + } + mmc_release_host(card->host); + + return retval; + cmd_err: + if(mmc_blk_bp_set_acc(card, 0)) + printk("mmcblk: Error setting back to user partition access\n"); + if(part_type == 2) { + if(mmc_blk_set_erase_grp_def(card, card->ext_csd.erase_group_def & 0x1)) + printk("mmcblk: Error setting erase group def\n"); + } + if (mq->bp_buf) + { + kfree(mq->bp_buf); + mq->bp_buf = NULL; + } + mmc_release_host(card->host); + + return -ENXIO; +} +static int mmc_blk_cmd_nodata(struct mmc_card *card, struct mmc_arb_cmd *parbcmd) +{ + struct mmc_command cmd; + unsigned int flags = 0; + int err; + + if(!parbcmd->resp) { + printk("mmcblk: Response buffer is NULL\n"); + return -EINVAL; + } + + /* convert 'rsp' to cmd.flags */ + switch (parbcmd->rsp) + { + case MMC_CMD_RSP_NONE: + flags |= MMC_RSP_NONE; + break; + case MMC_CMD_RSP_R1: + flags |= MMC_RSP_R1; + break; + case MMC_CMD_RSP_R1B: + flags |= MMC_RSP_R1B; + break; + case MMC_CMD_RSP_R2: + flags |= MMC_RSP_R2; + break; + case MMC_CMD_RSP_R3: + flags |= MMC_RSP_R3; + break; + case MMC_CMD_RSP_R4: + flags |= MMC_RSP_R4; + break; + case MMC_CMD_RSP_R5: + flags |= MMC_RSP_R5; + break; + } + + /* convert 'type' to cmd.flags */ + switch (parbcmd->type) + { + case MMC_CMD_TYPE_AC: + flags |= MMC_CMD_AC; + break; + case MMC_CMD_TYPE_ADTC: + flags |= MMC_CMD_ADTC; + break; + case MMC_CMD_TYPE_BC: + flags |= MMC_CMD_BC; + break; + case MMC_CMD_TYPE_BCR: + flags |= MMC_CMD_BCR; + break; + } + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = parbcmd->opcode; + cmd.arg = parbcmd->arg; + cmd.flags = flags; + + mmc_claim_host(card->host); + + err = mmc_wait_for_cmd(card->host, &cmd, 0); + if (err) { + printk("mmcblk: Command fail, opcode is %d, error is %d\n", cmd.opcode, err); + mmc_release_host(card->host); + return err; + } + + if(copy_to_user((void __user *) parbcmd->resp, cmd.resp, sizeof(u32) * 4)) { + mmc_release_host(card->host); + return -EFAULT; + } + + mmc_release_host(card->host); + + return 0; +} +static int mmc_blk_cmd_data(struct mmc_blk_data *md, struct mmc_arb_cmd *parbcmd) +{ + struct mmc_queue *mq; + struct mmc_card *card; + struct mmc_blk_request brq; + unsigned int flags = 0; + + int retval = 0; + + mq = &md->queue; + + card = md->queue.card; + + + //if((!mq->bp_sg) || (!mq->bp_buf)) + if(!mq->bp_sg) + return -ENOMEM; + + if(!parbcmd->resp) { + printk("mmcblk: Error - Response buffer is NULL\n"); + return -EINVAL; + } + + if(parbcmd->datalen % 512) { + printk("mmcblk: Error - Data length is not multiples of 512\n"); + return -EINVAL; + } + + if(parbcmd->datalen > card->host->max_req_size) { + printk("mmcblk: Error - Data length is too long (%d). max size is %d\n", parbcmd->datalen, card->host->max_req_size); + return -EINVAL; + } + + /* convert 'rsp' to cmd.flags */ + switch (parbcmd->rsp) + { + case MMC_CMD_RSP_NONE: + flags |= MMC_RSP_NONE; + break; + case MMC_CMD_RSP_R1: + flags |= MMC_RSP_R1; + break; + case MMC_CMD_RSP_R1B: + flags |= MMC_RSP_R1B; + break; + case MMC_CMD_RSP_R2: + flags |= MMC_RSP_R2; + break; + case MMC_CMD_RSP_R3: + flags |= MMC_RSP_R3; + break; + case MMC_CMD_RSP_R4: + flags |= MMC_RSP_R4; + break; + case MMC_CMD_RSP_R5: + flags |= MMC_RSP_R5; + break; + } + + /* convert 'type' to cmd.flags */ + switch (parbcmd->type) + { + case MMC_CMD_TYPE_AC: + flags |= MMC_CMD_AC; + break; + case MMC_CMD_TYPE_ADTC: + flags |= MMC_CMD_ADTC; + break; + case MMC_CMD_TYPE_BC: + flags |= MMC_CMD_BC; + break; + case MMC_CMD_TYPE_BCR: + flags |= MMC_CMD_BCR; + break; + } + + mmc_claim_host(card->host); + + mq->bp_buf = kmalloc(parbcmd->datalen, GFP_KERNEL); + if (!mq->bp_buf) { + printk("mmcblk: Error - unable to alloc boot partition memory buffer\n"); + mmc_release_host(card->host); + return -ENOMEM; + } + + if(parbcmd->datadir == BP_DIR_WRITE) + { + if(copy_from_user(mq->bp_buf, (void __user *) parbcmd->databuf, parbcmd->datalen)) + { + printk("mmcblk: Error - Copy from user failed\n"); + retval = -EFAULT; + goto cmd_err; + } + } + + do { + struct mmc_command cmd; + + memset(&brq, 0, sizeof(struct mmc_blk_request)); + brq.mrq.cmd = &brq.cmd; + brq.mrq.data = &brq.data; + + brq.cmd.arg = parbcmd->arg; + brq.cmd.flags = flags; + brq.data.blksz = MMC_SECTOR_SIZE; + brq.stop.opcode = MMC_STOP_TRANSMISSION; + brq.stop.arg = 0; + brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + brq.data.blocks = parbcmd->datalen / 512; + + if(parbcmd->stop) + brq.mrq.stop = &brq.stop; + else + brq.mrq.stop = NULL; + + brq.cmd.opcode = parbcmd->opcode; + + if (parbcmd->datadir == BP_DIR_READ) + { + brq.data.flags = MMC_DATA_READ; + } + else + { + brq.data.flags = MMC_DATA_WRITE; + } + + mmc_set_data_timeout(&brq.data, card); + + brq.data.sg = mq->bp_sg; + { + int i = 0; + int pages; + int datalen; + + datalen = parbcmd->datalen; + pages = datalen / (ADMA2_PAGE_SIZE); + if ((pages * (ADMA2_PAGE_SIZE)) bp_sg, pages); + for (i=0;ibp_sg)[i]), mq->bp_buf + (i* ADMA2_PAGE_SIZE), (datalen>ADMA2_PAGE_SIZE)?ADMA2_PAGE_SIZE:datalen); + datalen -= MAX_NUM_OF_SECTORS_ADMA2_PAGE * MMC_SECTOR_SIZE; + } + brq.data.sg_len = pages; + } + + mmc_wait_for_req(card->host, &brq.mrq); + + /* + * Check for errors here, but don't jump to cmd_err + * until later as we need to wait for the card to leave + * programming mode even when things go wrong. + */ + if (brq.cmd.error) + printk("mmcblk: Error sending read/write command\n"); + + if (brq.data.error) + printk("mmcblk: Error transferring data\n"); + + if (brq.stop.error) + printk("mmcblk: Error sending stop command\n"); + + if (parbcmd->dataready) { + do { + int err; + + cmd.opcode = MMC_SEND_STATUS; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + err = mmc_wait_for_cmd(card->host, &cmd, 5); + if (err) { + printk("mmcblk: Error requesting status\n"); + retval = -ENXIO; + goto cmd_err; + } + /* + * Some cards mishandle the status bits, + * so make sure to check both the busy + * indication and the card state. + */ + } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || + (R1_CURRENT_STATE(cmd.resp[0]) == 7)); + } + + if (brq.cmd.error || brq.data.error || brq.stop.error) + { + retval = -ENXIO; + goto cmd_err; + } + + } while (0); + + if(parbcmd->datadir == BP_DIR_READ) + { + if(copy_to_user((void __user *) parbcmd->databuf, mq->bp_buf, parbcmd->datalen)) + retval = -EFAULT; + } + + if(copy_to_user((void __user *) parbcmd->resp, brq.cmd.resp, sizeof(u32) * 4)) + retval = -EFAULT; + + cmd_err: + if (mq->bp_buf) + { + kfree(mq->bp_buf); + mq->bp_buf = NULL; + } + mmc_release_host(card->host); + + return retval; +} + +static int mmc_blk_arb_cmd(struct mmc_blk_data *md, unsigned long arg) +{ + struct mmc_queue *mq; + struct mmc_card *card; + struct mmc_arb_cmd arbcmd; + if(!md) + return -ENXIO; + mq = &md->queue; + if(!mq) + return -ENXIO; + card = md->queue.card; + if(!card) + return -ENXIO; + memset(&arbcmd, 0, sizeof(struct mmc_arb_cmd)); + if(copy_from_user(&arbcmd, (void __user *) arg, sizeof(struct mmc_arb_cmd))) + return -EFAULT; + if(!arbcmd.databuf || !arbcmd.datalen) + return mmc_blk_cmd_nodata(card, &arbcmd); + else + return mmc_blk_cmd_data(md, &arbcmd); +} + +DEFINE_MUTEX(mmc_ioctl_mutex); +int mmc_in_suspend = 0; + +static int mmc_blk_ioctl_puma(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) +{ + int retval; + mutex_lock(&mmc_ioctl_mutex); + if (mmc_in_suspend) { + mutex_unlock(&mmc_ioctl_mutex); + return -ENODEV; + } + switch (cmd) { + case MMC_BLK_IOCTL_BP_GETINFO: + retval = mmc_blk_bp_getinfo(bdev->bd_disk->private_data, arg); + break; + case MMC_BLK_IOCTL_GP_GETINFO: + retval = mmc_blk_gp_getinfo(bdev->bd_disk->private_data, arg); + break; + case MMC_BLK_IOCTL_BP_RDWR: + retval = mmc_blk_bp_rw(bdev->bd_disk->private_data, arg, 1); + break; + case MMC_BLK_IOCTL_GP_RDWR: + retval = mmc_blk_bp_rw(bdev->bd_disk->private_data, arg, 2); + break; + case MMC_BLK_IOCTL_ARB_CMD: + retval = mmc_blk_arb_cmd(bdev->bd_disk->private_data, arg); + break; + case MMC_BLK_IOCTL_CARD_INFO: + retval = mmc_blk_card_getinfo(bdev->bd_disk->private_data, arg); + break; + case MMC_BLK_IOCTL_CLAIM_HOST: + retval = mmc_blk_claim_host(bdev->bd_disk->private_data, arg); + break; + case MMC_BLK_IOCTL_RELEASE_HOST: + retval = mmc_blk_release_host(bdev->bd_disk->private_data, arg); + break; + case MMC_BLK_IOCTL_RESET_HOST_CTRL: + retval = mmc_blk_reset_host_controller(bdev->bd_disk->private_data, arg); + break; + case MMC_BLK_IOCTL_SET_IOS_HOST_CTRL: + retval = mmc_blk_set_io_host_controller(bdev->bd_disk->private_data, arg); + break; + case MMC_BLK_IOCTL_INIT_CARD: + retval = mmc_blk_init_card(bdev->bd_disk->private_data, arg); + break; + default: + retval = -ENOTTY; + } + mutex_unlock(&mmc_ioctl_mutex); + return retval; +} +#endif + struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; @@ -669,6 +1488,7 @@ return ioc_err ? ioc_err : err; } +#ifndef CONFIG_ARCH_GEN3 static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, struct mmc_ioc_multi_cmd __user *user, struct mmc_rpmb_data *rpmb) @@ -744,6 +1564,7 @@ kfree(idata); return ioc_err ? ioc_err : err; } +#endif static int mmc_blk_check_blkdev(struct block_device *bdev) { @@ -776,6 +1597,10 @@ NULL); mmc_blk_put(md); return ret; +#ifdef CONFIG_ARCH_GEN3 + default: + return mmc_blk_ioctl_puma(bdev, mode, cmd, arg); +#else case MMC_IOC_MULTI_CMD: ret = mmc_blk_check_blkdev(bdev); if (ret) @@ -790,6 +1615,7 @@ return ret; default: return -EINVAL; +#endif } } @@ -833,6 +1659,7 @@ .alternative_gpt_sector = mmc_blk_alternative_gpt_sector, }; + static int mmc_blk_part_switch_pre(struct mmc_card *card, unsigned int part_type) { @@ -870,9 +1697,11 @@ int ret = 0; struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); - if (main_md->part_curr == part_type) + if ((main_md->part_curr == part_type)&&(!card->host->mmc_force_part_switch)) return 0; + card->host->mmc_force_part_switch = false; + if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; @@ -2565,11 +3394,13 @@ (struct mmc_ioc_cmd __user *)arg, rpmb); break; +#ifndef CONFIG_ARCH_GEN3 case MMC_IOC_MULTI_CMD: ret = mmc_blk_ioctl_multi_cmd(rpmb->md, (struct mmc_ioc_multi_cmd __user *)arg, rpmb); break; +#endif default: ret = -EINVAL; break; @@ -2996,7 +3827,6 @@ pm_runtime_get_sync(&card->dev); if (md->part_curr != md->part_type) { mmc_claim_host(card->host); - mmc_blk_part_switch(card, md->part_type); mmc_release_host(card->host); } if (card->type != MMC_TYPE_SD_COMBO)