--- zzzz-none-000/linux-3.10.107/drivers/scsi/hpsa.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/scsi/hpsa.c 2021-02-04 17:41:59.000000000 +0000 @@ -1,6 +1,7 @@ /* * Disk Array driver for HP Smart Array SAS controllers - * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. + * Copyright 2014-2015 PMC-Sierra, Inc. + * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,11 +12,7 @@ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * Questions/Comments/Bugfixes to storagedev@pmcs.com * */ @@ -29,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -44,22 +40,34 @@ #include #include #include +#include +#include +#include #include #include #include #include -#include #include +#include +#include +#include +#include #include "hpsa_cmd.h" #include "hpsa.h" -/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ -#define HPSA_DRIVER_VERSION "2.0.2-1" +/* + * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' + * with an optional trailing '-' followed by a byte value (0-255). + */ +#define HPSA_DRIVER_VERSION "3.4.14-0" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" -/* How long to wait (in milliseconds) for board to go into simple mode */ -#define MAX_CONFIG_WAIT 30000 +/* How long to wait for CISS doorbell communication */ +#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ +#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ +#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ +#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ #define MAX_IOCTL_CONFIG_WAIT 1000 /*define how many times we will try a command because of bus resets */ @@ -89,8 +97,8 @@ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, @@ -99,15 +107,42 @@ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, + {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} @@ -125,8 +160,9 @@ {0x3245103C, "Smart Array P410i", &SA5_access}, {0x3247103C, "Smart Array P411", &SA5_access}, {0x3249103C, "Smart Array P812", &SA5_access}, - {0x324a103C, "Smart Array P712m", &SA5_access}, - {0x324b103C, "Smart Array P711m", &SA5_access}, + {0x324A103C, "Smart Array P712m", &SA5_access}, + {0x324B103C, "Smart Array P711m", &SA5_access}, + {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ {0x3350103C, "Smart Array P222", &SA5_access}, {0x3351103C, "Smart Array P420", &SA5_access}, {0x3352103C, "Smart Array P421", &SA5_access}, @@ -134,62 +170,104 @@ {0x3354103C, "Smart Array P420i", &SA5_access}, {0x3355103C, "Smart Array P220i", &SA5_access}, {0x3356103C, "Smart Array P721m", &SA5_access}, - {0x1920103C, "Smart Array", &SA5_access}, - {0x1921103C, "Smart Array", &SA5_access}, - {0x1922103C, "Smart Array", &SA5_access}, - {0x1923103C, "Smart Array", &SA5_access}, - {0x1924103C, "Smart Array", &SA5_access}, - {0x1925103C, "Smart Array", &SA5_access}, - {0x1926103C, "Smart Array", &SA5_access}, - {0x1928103C, "Smart Array", &SA5_access}, - {0x334d103C, "Smart Array P822se", &SA5_access}, + {0x1921103C, "Smart Array P830i", &SA5_access}, + {0x1922103C, "Smart Array P430", &SA5_access}, + {0x1923103C, "Smart Array P431", &SA5_access}, + {0x1924103C, "Smart Array P830", &SA5_access}, + {0x1926103C, "Smart Array P731m", &SA5_access}, + {0x1928103C, "Smart Array P230i", &SA5_access}, + {0x1929103C, "Smart Array P530", &SA5_access}, + {0x21BD103C, "Smart Array P244br", &SA5_access}, + {0x21BE103C, "Smart Array P741m", &SA5_access}, + {0x21BF103C, "Smart HBA H240ar", &SA5_access}, + {0x21C0103C, "Smart Array P440ar", &SA5_access}, + {0x21C1103C, "Smart Array P840ar", &SA5_access}, + {0x21C2103C, "Smart Array P440", &SA5_access}, + {0x21C3103C, "Smart Array P441", &SA5_access}, + {0x21C4103C, "Smart Array", &SA5_access}, + {0x21C5103C, "Smart Array P841", &SA5_access}, + {0x21C6103C, "Smart HBA H244br", &SA5_access}, + {0x21C7103C, "Smart HBA H240", &SA5_access}, + {0x21C8103C, "Smart HBA H241", &SA5_access}, + {0x21C9103C, "Smart Array", &SA5_access}, + {0x21CA103C, "Smart Array P246br", &SA5_access}, + {0x21CB103C, "Smart Array P840", &SA5_access}, + {0x21CC103C, "Smart Array", &SA5_access}, + {0x21CD103C, "Smart Array", &SA5_access}, + {0x21CE103C, "Smart HBA", &SA5_access}, + {0x05809005, "SmartHBA-SA", &SA5_access}, + {0x05819005, "SmartHBA-SA 8i", &SA5_access}, + {0x05829005, "SmartHBA-SA 8i8e", &SA5_access}, + {0x05839005, "SmartHBA-SA 8e", &SA5_access}, + {0x05849005, "SmartHBA-SA 16i", &SA5_access}, + {0x05859005, "SmartHBA-SA 4i4e", &SA5_access}, + {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, + {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, + {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, + {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, + {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; +static struct scsi_transport_template *hpsa_sas_transport_template; +static int hpsa_add_sas_host(struct ctlr_info *h); +static void hpsa_delete_sas_host(struct ctlr_info *h); +static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, + struct hpsa_scsi_dev_t *device); +static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device); +static struct hpsa_scsi_dev_t + *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, + struct sas_rphy *rphy); + +#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) +static const struct scsi_cmnd hpsa_cmd_busy; +#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) +static const struct scsi_cmnd hpsa_cmd_idle; static int number_of_controllers; -static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list); -static spinlock_t lockup_detector_lock; -static struct task_struct *hpsa_lockup_detector; - static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); -static void start_io(struct ctlr_info *h); +static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); #ifdef CONFIG_COMPAT -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); +static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, + void __user *arg); #endif static void cmd_free(struct ctlr_info *h, struct CommandList *c); -static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); static struct CommandList *cmd_alloc(struct ctlr_info *h); -static struct CommandList *cmd_special_alloc(struct ctlr_info *h); +static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); +static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, + struct scsi_cmnd *scmd); static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, - void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, + void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, int cmd_type); +static void hpsa_free_cmd_pool(struct ctlr_info *h); +#define VPD_PAGE (1 << 8) +#define HPSA_SIMPLE_ERROR_BITS 0x03 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static void hpsa_scan_start(struct Scsi_Host *); static int hpsa_scan_finished(struct Scsi_Host *sh, unsigned long elapsed_time); -static int hpsa_change_queue_depth(struct scsi_device *sdev, - int qdepth, int reason); +static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); static int hpsa_slave_alloc(struct scsi_device *sdev); +static int hpsa_slave_configure(struct scsi_device *sdev); static void hpsa_slave_destroy(struct scsi_device *sdev); -static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); +static void hpsa_update_scsi_devices(struct ctlr_info *h); static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c); static void check_ioctl_unit_attention(struct ctlr_info *h, struct CommandList *c); /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, - int nsgs, int *bucket_map); -static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); + int nsgs, int min_blocks, u32 *bucket_map); +static void hpsa_free_performant_mode(struct ctlr_info *h); +static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); static inline u32 next_command(struct ctlr_info *h, u8 q); static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, @@ -200,8 +278,21 @@ static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, int wait_for_ready); static inline void finish_cmd(struct CommandList *c); +static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); #define BOARD_NOT_READY 0 #define BOARD_READY 1 +static void hpsa_drain_accel_commands(struct ctlr_info *h); +static void hpsa_flush_cache(struct ctlr_info *h); +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); +static void hpsa_command_resubmit_worker(struct work_struct *work); +static u32 lockup_detected(struct ctlr_info *h); +static int detect_controller_lockup(struct ctlr_info *h); +static void hpsa_disable_rld_caching(struct ctlr_info *h); +static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, + struct ReportExtendedLUNdata *buf, int bufsize); +static int hpsa_luns_changed(struct ctlr_info *h); static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) { @@ -215,40 +306,86 @@ return (struct ctlr_info *) *priv; } +static inline bool hpsa_is_cmd_idle(struct CommandList *c) +{ + return c->scsi_cmd == SCSI_CMD_IDLE; +} + +static inline bool hpsa_is_pending_event(struct CommandList *c) +{ + return c->abort_pending || c->reset_pending; +} + +/* extract sense key, asc, and ascq from sense data. -1 means invalid. */ +static void decode_sense_data(const u8 *sense_data, int sense_data_len, + u8 *sense_key, u8 *asc, u8 *ascq) +{ + struct scsi_sense_hdr sshdr; + bool rc; + + *sense_key = -1; + *asc = -1; + *ascq = -1; + + if (sense_data_len < 1) + return; + + rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr); + if (rc) { + *sense_key = sshdr.sense_key; + *asc = sshdr.asc; + *ascq = sshdr.ascq; + } +} + static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c) { - if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) + u8 sense_key, asc, ascq; + int sense_len; + + if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) + sense_len = sizeof(c->err_info->SenseInfo); + else + sense_len = c->err_info->SenseLen; + + decode_sense_data(c->err_info->SenseInfo, sense_len, + &sense_key, &asc, &ascq); + if (sense_key != UNIT_ATTENTION || asc == 0xff) return 0; - switch (c->err_info->SenseInfo[12]) { + switch (asc) { case STATE_CHANGED: - dev_warn(&h->pdev->dev, HPSA "%d: a state change " - "detected, command retried\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: a state change detected, command retried\n", + h->devname); break; case LUN_FAILED: - dev_warn(&h->pdev->dev, HPSA "%d: LUN failure " - "detected, action required\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: LUN failure detected\n", h->devname); break; case REPORT_LUNS_CHANGED: - dev_warn(&h->pdev->dev, HPSA "%d: report LUN data " - "changed, action required\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: report LUN data changed\n", h->devname); /* * Note: this REPORT_LUNS_CHANGED condition only occurs on the external * target (array) devices. */ break; case POWER_OR_RESET: - dev_warn(&h->pdev->dev, HPSA "%d: a power on " - "or device reset detected\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: a power on or device reset detected\n", + h->devname); break; case UNIT_ATTENTION_CLEARED: - dev_warn(&h->pdev->dev, HPSA "%d: unit attention " - "cleared by another initiator\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: unit attention cleared by another initiator\n", + h->devname); break; default: - dev_warn(&h->pdev->dev, HPSA "%d: unknown " - "unit attention detected\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: unknown unit attention detected\n", + h->devname); break; } return 1; @@ -264,6 +401,69 @@ return 1; } +static u32 lockup_detected(struct ctlr_info *h); +static ssize_t host_show_lockup_detected(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ld; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + ld = lockup_detected(h); + + return sprintf(buf, "ld=%d\n", ld); +} + +static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int status, len; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + char tmpbuf[10]; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; + strncpy(tmpbuf, buf, len); + tmpbuf[len] = '\0'; + if (sscanf(tmpbuf, "%d", &status) != 1) + return -EINVAL; + h = shost_to_hba(shost); + h->acciopath_status = !!status; + dev_warn(&h->pdev->dev, + "hpsa: HP SSD Smart Path %s via sysfs update.\n", + h->acciopath_status ? "enabled" : "disabled"); + return count; +} + +static ssize_t host_store_raid_offload_debug(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int debug_level, len; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + char tmpbuf[10]; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; + strncpy(tmpbuf, buf, len); + tmpbuf[len] = '\0'; + if (sscanf(tmpbuf, "%d", &debug_level) != 1) + return -EINVAL; + if (debug_level < 0) + debug_level = 0; + h = shost_to_hba(shost); + h->raid_offload_debug = debug_level; + dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", + h->raid_offload_debug); + return count; +} + static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -296,7 +496,8 @@ struct Scsi_Host *shost = class_to_shost(dev); struct ctlr_info *h = shost_to_hba(shost); - return snprintf(buf, 20, "%d\n", h->commands_outstanding); + return snprintf(buf, 20, "%d\n", + atomic_read(&h->commands_outstanding)); } static ssize_t host_show_transport_mode(struct device *dev, @@ -311,10 +512,21 @@ "performant" : "simple"); } +static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 30, "HP SSD Smart Path %s\n", + (h->acciopath_status == 1) ? "enabled" : "disabled"); +} + /* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ - 0x324b103C, /* SmartArray P711m */ + 0x324b103C, /* Smart Array P711m */ 0x3223103C, /* Smart Array P800 */ 0x3234103C, /* Smart Array P400 */ 0x3235103C, /* Smart Array P400i */ @@ -356,24 +568,32 @@ 0x409D0E11, /* Smart Array 6400 EM */ }; -static int ctlr_is_hard_resettable(u32 board_id) +static u32 needs_abort_tags_swizzled[] = { + 0x323D103C, /* Smart Array P700m */ + 0x324a103C, /* Smart Array P712m */ + 0x324b103C, /* SmartArray P711m */ +}; + +static int board_id_in_array(u32 a[], int nelems, u32 board_id) { int i; - for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) - if (unresettable_controller[i] == board_id) - return 0; - return 1; + for (i = 0; i < nelems; i++) + if (a[i] == board_id) + return 1; + return 0; } -static int ctlr_is_soft_resettable(u32 board_id) +static int ctlr_is_hard_resettable(u32 board_id) { - int i; + return !board_id_in_array(unresettable_controller, + ARRAY_SIZE(unresettable_controller), board_id); +} - for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) - if (soft_unresettable_controller[i] == board_id) - return 0; - return 1; +static int ctlr_is_soft_resettable(u32 board_id) +{ + return !board_id_in_array(soft_unresettable_controller, + ARRAY_SIZE(soft_unresettable_controller), board_id); } static int ctlr_is_resettable(u32 board_id) @@ -382,6 +602,12 @@ ctlr_is_soft_resettable(board_id); } +static int ctlr_needs_abort_tags_swizzled(u32 board_id) +{ + return board_id_in_array(needs_abort_tags_swizzled, + ARRAY_SIZE(needs_abort_tags_swizzled), board_id); +} + static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { @@ -397,10 +623,23 @@ return (scsi3addr[3] & 0xC0) == 0x40; } -static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", - "1(ADM)", "UNKNOWN" +static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", + "1(+0)ADM", "UNKNOWN", "PHYS DRV" }; -#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) +#define HPSA_RAID_0 0 +#define HPSA_RAID_4 1 +#define HPSA_RAID_1 2 /* also used for RAID 10 */ +#define HPSA_RAID_5 3 /* also used for RAID 50 */ +#define HPSA_RAID_51 4 +#define HPSA_RAID_6 5 /* also used for RAID 60 */ +#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ +#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2) +#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1) + +static inline bool is_logical_device(struct hpsa_scsi_dev_t *device) +{ + return !device->physical_device; +} static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -422,7 +661,7 @@ } /* Is this even a logical drive? */ - if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { + if (!is_logical_device(hdev)) { spin_unlock_irqrestore(&h->lock, flags); l = snprintf(buf, PAGE_SIZE, "N/A\n"); return l; @@ -488,10 +727,129 @@ sn[12], sn[13], sn[14], sn[15]); } +static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + int offload_enabled; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + offload_enabled = hdev->offload_enabled; + spin_unlock_irqrestore(&h->lock, flags); + return snprintf(buf, 20, "%d\n", offload_enabled); +} + +#define MAX_PATHS 8 + +static ssize_t path_info_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + int i; + int output_len = 0; + u8 box; + u8 bay; + u8 path_map_index = 0; + char *active; + unsigned char phys_connector[2]; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->devlock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->devlock, flags); + return -ENODEV; + } + + bay = hdev->bay; + for (i = 0; i < MAX_PATHS; i++) { + path_map_index = 1<active_path_index) + active = "Active"; + else if (hdev->path_map & path_map_index) + active = "Inactive"; + else + continue; + + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "[%d:%d:%d:%d] %20.20s ", + h->scsi_host->host_no, + hdev->bus, hdev->target, hdev->lun, + scsi_device_type(hdev->devtype)); + + if (hdev->external || + hdev->devtype == TYPE_RAID || + is_logical_device(hdev)) { + output_len += snprintf(buf + output_len, + PAGE_SIZE - output_len, + "%s\n", active); + continue; + } + + box = hdev->box[i]; + memcpy(&phys_connector, &hdev->phys_connector[i], + sizeof(phys_connector)); + if (phys_connector[0] < '0') + phys_connector[0] = '0'; + if (phys_connector[1] < '0') + phys_connector[1] = '0'; + if (hdev->phys_connector[i] > 0) + output_len += snprintf(buf + output_len, + PAGE_SIZE - output_len, + "PORT: %.2s ", + phys_connector); + if (hdev->devtype == TYPE_DISK && hdev->expose_device) { + if (box == 0 || box == 0xFF) { + output_len += snprintf(buf + output_len, + PAGE_SIZE - output_len, + "BAY: %hhu %s\n", + bay, active); + } else { + output_len += snprintf(buf + output_len, + PAGE_SIZE - output_len, + "BOX: %hhu BAY: %hhu %s\n", + box, bay, active); + } + } else if (box != 0 && box != 0xFF) { + output_len += snprintf(buf + output_len, + PAGE_SIZE - output_len, "BOX: %hhu %s\n", + box, active); + } else + output_len += snprintf(buf + output_len, + PAGE_SIZE - output_len, "%s\n", active); + } + + spin_unlock_irqrestore(&h->devlock, flags); + return output_len; +} + static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); +static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, + host_show_hp_ssd_smart_path_enabled, NULL); +static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL); +static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, + host_show_hp_ssd_smart_path_status, + host_store_hp_ssd_smart_path_status); +static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, + host_store_raid_offload_debug); static DEVICE_ATTR(firmware_revision, S_IRUGO, host_show_firmware_revision, NULL); static DEVICE_ATTR(commands_outstanding, S_IRUGO, @@ -500,11 +858,15 @@ host_show_transport_mode, NULL); static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); +static DEVICE_ATTR(lockup_detected, S_IRUGO, + host_show_lockup_detected, NULL); static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, &dev_attr_lunid, &dev_attr_unique_id, + &dev_attr_hp_ssd_smart_path_enabled, + &dev_attr_path_info, NULL, }; @@ -514,9 +876,15 @@ &dev_attr_commands_outstanding, &dev_attr_transport_mode, &dev_attr_resettable, + &dev_attr_hp_ssd_smart_path_status, + &dev_attr_raid_offload_debug, + &dev_attr_lockup_detected, NULL, }; +#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \ + HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS) + static struct scsi_host_template hpsa_driver_template = { .module = THIS_MODULE, .name = HPSA, @@ -531,6 +899,7 @@ .eh_device_reset_handler = hpsa_eh_device_reset_handler, .ioctl = hpsa_ioctl, .slave_alloc = hpsa_slave_alloc, + .slave_configure = hpsa_slave_configure, .slave_destroy = hpsa_slave_destroy, #ifdef CONFIG_COMPAT .compat_ioctl = hpsa_compat_ioctl, @@ -541,18 +910,13 @@ .no_write_same = 1, }; - -/* Enqueuing and dequeuing functions for cmdlists. */ -static inline void addQ(struct list_head *list, struct CommandList *c) -{ - list_add_tail(&c->list, list); -} - static inline u32 next_command(struct ctlr_info *h, u8 q) { u32 a; - struct reply_pool *rq = &h->reply_queue[q]; - unsigned long flags; + struct reply_queue_buffer *rq = &h->reply_queue[q]; + + if (h->transMethod & CFGTBL_Trans_io_accel1) + return h->access.command_completed(h, q); if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return h->access.command_completed(h, q); @@ -560,9 +924,7 @@ if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { a = rq->head[rq->current_entry]; rq->current_entry++; - spin_lock_irqsave(&h->lock, flags); - h->commands_outstanding--; - spin_unlock_irqrestore(&h->lock, flags); + atomic_dec(&h->commands_outstanding); } else { a = FIFO_EMPTY; } @@ -574,20 +936,122 @@ return a; } -/* set_performant_mode: Modify the tag for cciss performant +/* + * There are some special bits in the bus address of the + * command that we have to set for the controller to know + * how to process the command: + * + * Normal performant mode: + * bit 0: 1 means performant mode, 0 means simple mode. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 0) + * + * ioaccel1 mode: + * bit 0 = "performant mode" bit. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 110) + * (command type is needed because ioaccel1 mode + * commands are submitted through the same register as normal + * mode commands, so this is how the controller knows whether + * the command is normal mode or ioaccel1 mode.) + * + * ioaccel2 mode: + * bit 0 = "performant mode" bit. + * bits 1-4 = block fetch table entry (note extra bit) + * bits 4-6 = not needed, because ioaccel2 mode has + * a separate special register for submitting commands. + */ + +/* + * set_performant_mode: Modify the tag for cciss performant * set bit 0 for pull model, bits 3-1 for block fetch * register number */ -static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) +#define DEFAULT_REPLY_QUEUE (-1) +static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, + int reply_queue) { if (likely(h->transMethod & CFGTBL_Trans_Performant)) { c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); - if (likely(h->msix_vector)) + if (unlikely(!h->msix_vector)) + return; + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) c->Header.ReplyQueue = - smp_processor_id() % h->nreply_queues; + raw_smp_processor_id() % h->nreply_queues; + else + c->Header.ReplyQueue = reply_queue % h->nreply_queues; } } +static void set_ioaccel1_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) +{ + struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; + + /* + * Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) + cp->ReplyQueue = smp_processor_id() % h->nreply_queues; + else + cp->ReplyQueue = reply_queue % h->nreply_queues; + /* + * Set the bits in the address sent down to include: + * - performant mode bit (bit 0) + * - pull count (bits 1-3) + * - command type (bits 4-6) + */ + c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | + IOACCEL1_BUSADDR_CMDTYPE; +} + +static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) +{ + struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) + &h->ioaccel2_cmd_pool[c->cmdindex]; + + /* Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) + cp->reply_queue = smp_processor_id() % h->nreply_queues; + else + cp->reply_queue = reply_queue % h->nreply_queues; + /* Set the bits in the address sent down to include: + * - performant mode bit not used in ioaccel mode 2 + * - pull count (bits 0-3) + * - command type isn't needed for ioaccel2 + */ + c->busaddr |= h->ioaccel2_blockFetchTable[0]; +} + +static void set_ioaccel2_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) +{ + struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + + /* + * Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) + cp->reply_queue = smp_processor_id() % h->nreply_queues; + else + cp->reply_queue = reply_queue % h->nreply_queues; + /* + * Set the bits in the address sent down to include: + * - performant mode bit not used in ioaccel mode 2 + * - pull count (bits 0-3) + * - command type isn't needed for ioaccel2 + */ + c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); +} + static int is_firmware_flash_cmd(u8 *cdb) { return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; @@ -617,25 +1081,36 @@ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; } -static void enqueue_cmd_and_start_io(struct ctlr_info *h, - struct CommandList *c) +static void __enqueue_cmd_and_start_io(struct ctlr_info *h, + struct CommandList *c, int reply_queue) { - unsigned long flags; - - set_performant_mode(h, c); dial_down_lockup_detection_during_fw_flash(h, c); - spin_lock_irqsave(&h->lock, flags); - addQ(&h->reqQ, c); - h->Qdepth++; - spin_unlock_irqrestore(&h->lock, flags); - start_io(h); + atomic_inc(&h->commands_outstanding); + switch (c->cmd_type) { + case CMD_IOACCEL1: + set_ioaccel1_performant_mode(h, c, reply_queue); + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); + break; + case CMD_IOACCEL2: + set_ioaccel2_performant_mode(h, c, reply_queue); + writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); + break; + case IOACCEL2_TMF: + set_ioaccel2_tmf_performant_mode(h, c, reply_queue); + writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); + break; + default: + set_performant_mode(h, c, reply_queue); + h->access.submit_command(h, c); + } } -static inline void removeQ(struct CommandList *c) +static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) { - if (WARN_ON(list_empty(&c->list))) - return; - list_del_init(&c->list); + if (unlikely(hpsa_is_pending_event(c))) + return finish_cmd(c); + + __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); } static inline int is_hba_lunid(unsigned char scsi3addr[]) @@ -678,8 +1153,62 @@ return !found; } +static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev, char *description) +{ +#define LABEL_SIZE 25 + char label[LABEL_SIZE]; + + if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) + return; + + switch (dev->devtype) { + case TYPE_RAID: + snprintf(label, LABEL_SIZE, "controller"); + break; + case TYPE_ENCLOSURE: + snprintf(label, LABEL_SIZE, "enclosure"); + break; + case TYPE_DISK: + if (dev->external) + snprintf(label, LABEL_SIZE, "external"); + else if (!is_logical_dev_addr_mode(dev->scsi3addr)) + snprintf(label, LABEL_SIZE, "%s", + raid_label[PHYSICAL_DRIVE]); + else + snprintf(label, LABEL_SIZE, "RAID-%s", + dev->raid_level > RAID_UNKNOWN ? "?" : + raid_label[dev->raid_level]); + break; + case TYPE_ROM: + snprintf(label, LABEL_SIZE, "rom"); + break; + case TYPE_TAPE: + snprintf(label, LABEL_SIZE, "tape"); + break; + case TYPE_MEDIUM_CHANGER: + snprintf(label, LABEL_SIZE, "changer"); + break; + default: + snprintf(label, LABEL_SIZE, "UNKNOWN"); + break; + } + + dev_printk(level, &h->pdev->dev, + "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n", + h->scsi_host->host_no, dev->bus, dev->target, dev->lun, + description, + scsi_device_type(dev->devtype), + dev->vendor, + dev->model, + label, + dev->offload_config ? '+' : '-', + dev->offload_enabled ? '+' : '-', + dev->expose_device); +} + /* Add an entry into h->dev[] array. */ -static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, +static int hpsa_scsi_add_entry(struct ctlr_info *h, struct hpsa_scsi_dev_t *device, struct hpsa_scsi_dev_t *added[], int *nadded) { @@ -702,7 +1231,7 @@ /* If this device a non-zero lun of a multi-lun device * byte 4 of the 8-byte LUN addr will contain the logical - * unit no, zero otherise. + * unit no, zero otherwise. */ if (device->scsi3addr[4] == 0) { /* This is not a non-zero lun of a multi-lun device */ @@ -714,17 +1243,19 @@ /* This is a non-zero lun of a multi-lun device. * Search through our list and find the device which - * has the same 8 byte LUN address, excepting byte 4. + * has the same 8 byte LUN address, excepting byte 4 and 5. * Assign the same bus and target for this new LUN. * Use the logical unit number from the firmware. */ memcpy(addr1, device->scsi3addr, 8); addr1[4] = 0; + addr1[5] = 0; for (i = 0; i < n; i++) { sd = h->dev[i]; memcpy(addr2, sd->scsi3addr, 8); addr2[4] = 0; - /* differ only in byte 4? */ + addr2[5] = 0; + /* differ only in byte 4 and 5? */ if (memcmp(addr1, addr2, 8) == 0) { device->bus = sd->bus; device->target = sd->target; @@ -745,34 +1276,63 @@ h->ndevices++; added[*nadded] = device; (*nadded)++; - - /* initially, (before registering with scsi layer) we don't - * know our hostno and we don't want to print anything first - * time anyway (the scsi layer's inquiries will show that info) - */ - /* if (hostno != -1) */ - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", - scsi_device_type(device->devtype), hostno, - device->bus, device->target, device->lun); + hpsa_show_dev_msg(KERN_INFO, h, device, + device->expose_device ? "added" : "masked"); + device->offload_to_be_enabled = device->offload_enabled; + device->offload_enabled = 0; return 0; } /* Update an entry in h->dev[] array. */ -static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, +static void hpsa_scsi_update_entry(struct ctlr_info *h, int entry, struct hpsa_scsi_dev_t *new_entry) { + int offload_enabled; /* assumes h->devlock is held */ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); /* Raid level changed. */ h->dev[entry]->raid_level = new_entry->raid_level; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", - scsi_device_type(new_entry->devtype), hostno, new_entry->bus, - new_entry->target, new_entry->lun); + + /* Raid offload parameters changed. Careful about the ordering. */ + if (new_entry->offload_config && new_entry->offload_enabled) { + /* + * if drive is newly offload_enabled, we want to copy the + * raid map data first. If previously offload_enabled and + * offload_config were set, raid map data had better be + * the same as it was before. if raid map data is changed + * then it had better be the case that + * h->dev[entry]->offload_enabled is currently 0. + */ + h->dev[entry]->raid_map = new_entry->raid_map; + h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; + } + if (new_entry->hba_ioaccel_enabled) { + h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; + wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ + } + h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; + h->dev[entry]->offload_config = new_entry->offload_config; + h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; + h->dev[entry]->queue_depth = new_entry->queue_depth; + + /* + * We can turn off ioaccel offload now, but need to delay turning + * it on until we can update h->dev[entry]->phys_disk[], but we + * can't do that until all the devices are updated. + */ + h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled; + if (!new_entry->offload_enabled) + h->dev[entry]->offload_enabled = 0; + + offload_enabled = h->dev[entry]->offload_enabled; + h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled; + hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); + h->dev[entry]->offload_enabled = offload_enabled; } /* Replace an entry from h->dev[] array. */ -static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, +static void hpsa_scsi_replace_entry(struct ctlr_info *h, int entry, struct hpsa_scsi_dev_t *new_entry, struct hpsa_scsi_dev_t *added[], int *nadded, struct hpsa_scsi_dev_t *removed[], int *nremoved) @@ -794,13 +1354,13 @@ h->dev[entry] = new_entry; added[*nadded] = new_entry; (*nadded)++; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", - scsi_device_type(new_entry->devtype), hostno, new_entry->bus, - new_entry->target, new_entry->lun); + hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); + new_entry->offload_to_be_enabled = new_entry->offload_enabled; + new_entry->offload_enabled = 0; } /* Remove an entry from h->dev[] array. */ -static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, +static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, struct hpsa_scsi_dev_t *removed[], int *nremoved) { /* assumes h->devlock is held */ @@ -816,9 +1376,7 @@ for (i = entry; i < h->ndevices-1; i++) h->dev[i] = h->dev[i+1]; h->ndevices--; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", - scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, - sd->lun); + hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); } #define SCSI3ADDR_EQ(a, b) ( \ @@ -886,6 +1444,13 @@ */ if (dev1->raid_level != dev2->raid_level) return 1; + if (dev1->offload_config != dev2->offload_config) + return 1; + if (dev1->offload_enabled != dev2->offload_enabled) + return 1; + if (!is_logical_dev_addr_mode(dev1->scsi3addr)) + if (dev1->queue_depth != dev2->queue_depth) + return 1; return 0; } @@ -906,6 +1471,9 @@ #define DEVICE_CHANGED 1 #define DEVICE_SAME 2 #define DEVICE_UPDATED 3 + if (needle == NULL) + return DEVICE_NOT_FOUND; + for (i = 0; i < haystack_size; i++) { if (haystack[i] == NULL) /* previously removed. */ continue; @@ -916,6 +1484,9 @@ return DEVICE_UPDATED; return DEVICE_SAME; } else { + /* Keep offline devices offline */ + if (needle->volume_offline) + return DEVICE_NOT_FOUND; return DEVICE_CHANGED; } } @@ -924,7 +1495,256 @@ return DEVICE_NOT_FOUND; } -static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, +static void hpsa_monitor_offline_device(struct ctlr_info *h, + unsigned char scsi3addr[]) +{ + struct offline_device_entry *device; + unsigned long flags; + + /* Check to see if device is already on the list */ + spin_lock_irqsave(&h->offline_device_lock, flags); + list_for_each_entry(device, &h->offline_device_list, offline_list) { + if (memcmp(device->scsi3addr, scsi3addr, + sizeof(device->scsi3addr)) == 0) { + spin_unlock_irqrestore(&h->offline_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&h->offline_device_lock, flags); + + /* Device is not on the list, add it. */ + device = kmalloc(sizeof(*device), GFP_KERNEL); + if (!device) { + dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); + return; + } + memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); + spin_lock_irqsave(&h->offline_device_lock, flags); + list_add_tail(&device->offline_list, &h->offline_device_list); + spin_unlock_irqrestore(&h->offline_device_lock, flags); +} + +/* Print a message explaining various offline volume states */ +static void hpsa_show_volume_status(struct ctlr_info *h, + struct hpsa_scsi_dev_t *sd) +{ + if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + switch (sd->volume_offline) { + case HPSA_LV_OK: + break; + case HPSA_LV_UNDERGOING_ERASE: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_NOT_AVAILABLE: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_UNDERGOING_RPI: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PENDING_RPI: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_ENCRYPTED_NO_KEY: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_UNDERGOING_ENCRYPTION: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PENDING_ENCRYPTION: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PENDING_ENCRYPTION_REKEYING: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + } +} + +/* + * Figure the list of physical drive pointers for a logical drive with + * raid offload configured. + */ +static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev[], int ndevices, + struct hpsa_scsi_dev_t *logical_drive) +{ + struct raid_map_data *map = &logical_drive->raid_map; + struct raid_map_disk_data *dd = &map->data[0]; + int i, j; + int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + + le16_to_cpu(map->metadata_disks_per_row); + int nraid_map_entries = le16_to_cpu(map->row_cnt) * + le16_to_cpu(map->layout_map_count) * + total_disks_per_row; + int nphys_disk = le16_to_cpu(map->layout_map_count) * + total_disks_per_row; + int qdepth; + + if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) + nraid_map_entries = RAID_MAP_MAX_ENTRIES; + + logical_drive->nphysical_disks = nraid_map_entries; + + qdepth = 0; + for (i = 0; i < nraid_map_entries; i++) { + logical_drive->phys_disk[i] = NULL; + if (!logical_drive->offload_config) + continue; + for (j = 0; j < ndevices; j++) { + if (dev[j] == NULL) + continue; + if (dev[j]->devtype != TYPE_DISK) + continue; + if (is_logical_device(dev[j])) + continue; + if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) + continue; + + logical_drive->phys_disk[i] = dev[j]; + if (i < nphys_disk) + qdepth = min(h->nr_cmds, qdepth + + logical_drive->phys_disk[i]->queue_depth); + break; + } + + /* + * This can happen if a physical drive is removed and + * the logical drive is degraded. In that case, the RAID + * map data will refer to a physical disk which isn't actually + * present. And in that case offload_enabled should already + * be 0, but we'll turn it off here just in case + */ + if (!logical_drive->phys_disk[i]) { + logical_drive->offload_enabled = 0; + logical_drive->offload_to_be_enabled = 0; + logical_drive->queue_depth = 8; + } + } + if (nraid_map_entries) + /* + * This is correct for reads, too high for full stripe writes, + * way too high for partial stripe writes + */ + logical_drive->queue_depth = qdepth; + else + logical_drive->queue_depth = h->nr_cmds; +} + +static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev[], int ndevices) +{ + int i; + + for (i = 0; i < ndevices; i++) { + if (dev[i] == NULL) + continue; + if (dev[i]->devtype != TYPE_DISK) + continue; + if (!is_logical_device(dev[i])) + continue; + + /* + * If offload is currently enabled, the RAID map and + * phys_disk[] assignment *better* not be changing + * and since it isn't changing, we do not need to + * update it. + */ + if (dev[i]->offload_enabled) + continue; + + hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); + } +} + +static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) +{ + int rc = 0; + + if (!h->scsi_host) + return 1; + + if (is_logical_device(device)) /* RAID */ + rc = scsi_add_device(h->scsi_host, device->bus, + device->target, device->lun); + else /* HBA */ + rc = hpsa_add_sas_device(h->sas_host, device); + + return rc; +} + +static void hpsa_remove_device(struct ctlr_info *h, + struct hpsa_scsi_dev_t *device) +{ + struct scsi_device *sdev = NULL; + + if (!h->scsi_host) + return; + + if (is_logical_device(device)) { /* RAID */ + sdev = scsi_device_lookup(h->scsi_host, device->bus, + device->target, device->lun); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + /* + * We don't expect to get here. Future commands + * to this device will get a selection timeout as + * if the device were gone. + */ + hpsa_show_dev_msg(KERN_WARNING, h, device, + "didn't find device for removal."); + } + } else /* HBA */ + hpsa_remove_sas_device(device); +} + +static void adjust_hpsa_scsi_table(struct ctlr_info *h, struct hpsa_scsi_dev_t *sd[], int nsds) { /* sd contains scsi3 addresses and devtypes, and inquiry @@ -936,7 +1756,15 @@ unsigned long flags; struct hpsa_scsi_dev_t **added, **removed; int nadded, nremoved; - struct Scsi_Host *sh = NULL; + + /* + * A reset can cause a device status to change + * re-schedule the scan to see what happened. + */ + if (h->reset_in_progress) { + h->drv_req_rescan = 1; + return; + } added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); @@ -964,19 +1792,18 @@ device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); if (device_change == DEVICE_NOT_FOUND) { changes++; - hpsa_scsi_remove_entry(h, hostno, i, - removed, &nremoved); + hpsa_scsi_remove_entry(h, i, removed, &nremoved); continue; /* remove ^^^, hence i not incremented */ } else if (device_change == DEVICE_CHANGED) { changes++; - hpsa_scsi_replace_entry(h, hostno, i, sd[entry], + hpsa_scsi_replace_entry(h, i, sd[entry], added, &nadded, removed, &nremoved); /* Set it to NULL to prevent it from being freed * at the bottom of hpsa_update_scsi_devices() */ sd[entry] = NULL; } else if (device_change == DEVICE_UPDATED) { - hpsa_scsi_update_entry(h, hostno, i, sd[entry]); + hpsa_scsi_update_entry(h, i, sd[entry]); } i++; } @@ -988,12 +1815,23 @@ for (i = 0; i < nsds; i++) { if (!sd[i]) /* if already added above. */ continue; + + /* Don't add devices which are NOT READY, FORMAT IN PROGRESS + * as the SCSI mid-layer does not handle such devices well. + * It relentlessly loops sending TUR at 3Hz, then READ(10) + * at 160Hz, and prevents the system from coming up. + */ + if (sd[i]->volume_offline) { + hpsa_show_volume_status(h, sd[i]); + hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); + continue; + } + device_change = hpsa_scsi_find_entry(sd[i], h->dev, h->ndevices, &entry); if (device_change == DEVICE_NOT_FOUND) { changes++; - if (hpsa_scsi_add_entry(h, hostno, sd[i], - added, &nadded) != 0) + if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0) break; sd[i] = NULL; /* prevent from being freed later. */ } else if (device_change == DEVICE_CHANGED) { @@ -1004,49 +1842,65 @@ /* but if it does happen, we just ignore that device */ } } + hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); + + /* Now that h->dev[]->phys_disk[] is coherent, we can enable + * any logical drives that need it enabled. + */ + for (i = 0; i < h->ndevices; i++) { + if (h->dev[i] == NULL) + continue; + h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; + } + spin_unlock_irqrestore(&h->devlock, flags); + /* Monitor devices which are in one of several NOT READY states to be + * brought online later. This must be done without holding h->devlock, + * so don't touch h->dev[] + */ + for (i = 0; i < nsds; i++) { + if (!sd[i]) /* if already added above. */ + continue; + if (sd[i]->volume_offline) + hpsa_monitor_offline_device(h, sd[i]->scsi3addr); + } + /* Don't notify scsi mid layer of any changes the first time through * (or if there are no changes) scsi_scan_host will do it later the * first time through. */ - if (hostno == -1 || !changes) + if (!changes) goto free_and_out; - sh = h->scsi_host; /* Notify scsi mid layer of any removed devices */ for (i = 0; i < nremoved; i++) { - struct scsi_device *sdev = - scsi_device_lookup(sh, removed[i]->bus, - removed[i]->target, removed[i]->lun); - if (sdev != NULL) { - scsi_remove_device(sdev); - scsi_device_put(sdev); - } else { - /* We don't expect to get here. - * future cmds to this device will get selection - * timeout as if the device was gone. - */ - dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " - " for removal.", hostno, removed[i]->bus, - removed[i]->target, removed[i]->lun); - } + if (removed[i] == NULL) + continue; + if (removed[i]->expose_device) + hpsa_remove_device(h, removed[i]); kfree(removed[i]); removed[i] = NULL; } /* Notify scsi mid layer of any added devices */ for (i = 0; i < nadded; i++) { - if (scsi_add_device(sh, added[i]->bus, - added[i]->target, added[i]->lun) == 0) + int rc = 0; + + if (added[i] == NULL) + continue; + if (!(added[i]->expose_device)) + continue; + rc = hpsa_add_device(h, added[i]); + if (!rc) continue; - dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " - "device not added.\n", hostno, added[i]->bus, - added[i]->target, added[i]->lun); + dev_warn(&h->pdev->dev, + "addition failed %d, device not added.", rc); /* now we have to remove it from h->dev, * since it didn't get added to scsi mid layer */ fixup_botched_add(h, added[i]); + h->drv_req_rescan = 1; } free_and_out: @@ -1055,7 +1909,7 @@ } /* - * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * + * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * * Assume's h->devlock is held. */ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, @@ -1072,7 +1926,6 @@ return NULL; } -/* link sdev->hostdata to our per-device structure. */ static int hpsa_slave_alloc(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *sd; @@ -1081,19 +1934,95 @@ h = sdev_to_hba(sdev); spin_lock_irqsave(&h->devlock, flags); - sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), - sdev_id(sdev), sdev->lun); - if (sd != NULL) + if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) { + struct scsi_target *starget; + struct sas_rphy *rphy; + + starget = scsi_target(sdev); + rphy = target_to_rphy(starget); + sd = hpsa_find_device_by_sas_rphy(h, rphy); + if (sd) { + sd->target = sdev_id(sdev); + sd->lun = sdev->lun; + } + } else + sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), + sdev_id(sdev), sdev->lun); + + if (sd && sd->expose_device) { + atomic_set(&sd->ioaccel_cmds_out, 0); sdev->hostdata = sd; + } else + sdev->hostdata = NULL; spin_unlock_irqrestore(&h->devlock, flags); return 0; } +/* configure scsi device based on internal per-device structure */ +static int hpsa_slave_configure(struct scsi_device *sdev) +{ + struct hpsa_scsi_dev_t *sd; + int queue_depth; + + sd = sdev->hostdata; + sdev->no_uld_attach = !sd || !sd->expose_device; + + if (sd) + queue_depth = sd->queue_depth != 0 ? + sd->queue_depth : sdev->host->can_queue; + else + queue_depth = sdev->host->can_queue; + + scsi_change_queue_depth(sdev, queue_depth); + + return 0; +} + static void hpsa_slave_destroy(struct scsi_device *sdev) { /* nothing to do. */ } +static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (!h->ioaccel2_cmd_sg_list) + return; + for (i = 0; i < h->nr_cmds; i++) { + kfree(h->ioaccel2_cmd_sg_list[i]); + h->ioaccel2_cmd_sg_list[i] = NULL; + } + kfree(h->ioaccel2_cmd_sg_list); + h->ioaccel2_cmd_sg_list = NULL; +} + +static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (h->chainsize <= 0) + return 0; + + h->ioaccel2_cmd_sg_list = + kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds, + GFP_KERNEL); + if (!h->ioaccel2_cmd_sg_list) + return -ENOMEM; + for (i = 0; i < h->nr_cmds; i++) { + h->ioaccel2_cmd_sg_list[i] = + kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) * + h->maxsgentries, GFP_KERNEL); + if (!h->ioaccel2_cmd_sg_list[i]) + goto clean; + } + return 0; + +clean: + hpsa_free_ioaccel2_sg_chain_blocks(h); + return -ENOMEM; +} + static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) { int i; @@ -1108,7 +2037,7 @@ h->cmd_sg_list = NULL; } -static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) +static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) { int i; @@ -1117,13 +2046,17 @@ h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, GFP_KERNEL); - if (!h->cmd_sg_list) + if (!h->cmd_sg_list) { + dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); return -ENOMEM; + } for (i = 0; i < h->nr_cmds; i++) { h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * h->chainsize, GFP_KERNEL); - if (!h->cmd_sg_list[i]) + if (!h->cmd_sg_list[i]) { + dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); goto clean; + } } return 0; @@ -1132,27 +2065,60 @@ return -ENOMEM; } +static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, + struct io_accel2_cmd *cp, struct CommandList *c) +{ + struct ioaccel2_sg_element *chain_block; + u64 temp64; + u32 chain_size; + + chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; + chain_size = le32_to_cpu(cp->sg[0].length); + temp64 = pci_map_single(h->pdev, chain_block, chain_size, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&h->pdev->dev, temp64)) { + /* prevent subsequent unmapping */ + cp->sg->address = 0; + return -1; + } + cp->sg->address = cpu_to_le64(temp64); + return 0; +} + +static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, + struct io_accel2_cmd *cp) +{ + struct ioaccel2_sg_element *chain_sg; + u64 temp64; + u32 chain_size; + + chain_sg = cp->sg; + temp64 = le64_to_cpu(chain_sg->address); + chain_size = le32_to_cpu(cp->sg[0].length); + pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); +} + static int hpsa_map_sg_chain_block(struct ctlr_info *h, struct CommandList *c) { struct SGDescriptor *chain_sg, *chain_block; u64 temp64; + u32 chain_len; chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; chain_block = h->cmd_sg_list[c->cmdindex]; - chain_sg->Ext = HPSA_SG_CHAIN; - chain_sg->Len = sizeof(*chain_sg) * - (c->Header.SGTotal - h->max_cmd_sg_entries); - temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, + chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); + chain_len = sizeof(*chain_sg) * + (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); + chain_sg->Len = cpu_to_le32(chain_len); + temp64 = pci_map_single(h->pdev, chain_block, chain_len, PCI_DMA_TODEVICE); if (dma_mapping_error(&h->pdev->dev, temp64)) { /* prevent subsequent unmapping */ - chain_sg->Addr.lower = 0; - chain_sg->Addr.upper = 0; + chain_sg->Addr = cpu_to_le64(0); return -1; } - chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); - chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); + chain_sg->Addr = cpu_to_le64(temp64); return 0; } @@ -1160,15 +2126,250 @@ struct CommandList *c) { struct SGDescriptor *chain_sg; - union u64bit temp64; - if (c->Header.SGTotal <= h->max_cmd_sg_entries) + if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) return; chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; - temp64.val32.lower = chain_sg->Addr.lower; - temp64.val32.upper = chain_sg->Addr.upper; - pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); + pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), + le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); +} + + +/* Decode the various types of errors on ioaccel2 path. + * Return 1 for any error that should generate a RAID path retry. + * Return 0 for errors that don't require a RAID path retry. + */ +static int handle_ioaccel_mode2_error(struct ctlr_info *h, + struct CommandList *c, + struct scsi_cmnd *cmd, + struct io_accel2_cmd *c2) +{ + int data_len; + int retry = 0; + u32 ioaccel2_resid = 0; + + switch (c2->error_data.serv_response) { + case IOACCEL2_SERV_RESPONSE_COMPLETE: + switch (c2->error_data.status) { + case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: + break; + case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: + cmd->result |= SAM_STAT_CHECK_CONDITION; + if (c2->error_data.data_present != + IOACCEL2_SENSE_DATA_PRESENT) { + memset(cmd->sense_buffer, 0, + SCSI_SENSE_BUFFERSIZE); + break; + } + /* copy the sense data */ + data_len = c2->error_data.sense_data_len; + if (data_len > SCSI_SENSE_BUFFERSIZE) + data_len = SCSI_SENSE_BUFFERSIZE; + if (data_len > sizeof(c2->error_data.sense_data_buff)) + data_len = + sizeof(c2->error_data.sense_data_buff); + memcpy(cmd->sense_buffer, + c2->error_data.sense_data_buff, data_len); + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: + retry = 1; + break; + default: + retry = 1; + break; + } + break; + case IOACCEL2_SERV_RESPONSE_FAILURE: + switch (c2->error_data.status) { + case IOACCEL2_STATUS_SR_IO_ERROR: + case IOACCEL2_STATUS_SR_IO_ABORTED: + case IOACCEL2_STATUS_SR_OVERRUN: + retry = 1; + break; + case IOACCEL2_STATUS_SR_UNDERRUN: + cmd->result = (DID_OK << 16); /* host byte */ + cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ + ioaccel2_resid = get_unaligned_le32( + &c2->error_data.resid_cnt[0]); + scsi_set_resid(cmd, ioaccel2_resid); + break; + case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: + case IOACCEL2_STATUS_SR_INVALID_DEVICE: + case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: + /* We will get an event from ctlr to trigger rescan */ + retry = 1; + break; + default: + retry = 1; + } + break; + case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: + break; + case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: + break; + case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: + retry = 1; + break; + case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: + break; + default: + retry = 1; + break; + } + + return retry; /* retry on raid path? */ +} + +static void hpsa_cmd_resolve_events(struct ctlr_info *h, + struct CommandList *c) +{ + bool do_wake = false; + + /* + * Prevent the following race in the abort handler: + * + * 1. LLD is requested to abort a SCSI command + * 2. The SCSI command completes + * 3. The struct CommandList associated with step 2 is made available + * 4. New I/O request to LLD to another LUN re-uses struct CommandList + * 5. Abort handler follows scsi_cmnd->host_scribble and + * finds struct CommandList and tries to aborts it + * Now we have aborted the wrong command. + * + * Reset c->scsi_cmd here so that the abort or reset handler will know + * this command has completed. Then, check to see if the handler is + * waiting for this command, and, if so, wake it. + */ + c->scsi_cmd = SCSI_CMD_IDLE; + mb(); /* Declare command idle before checking for pending events. */ + if (c->abort_pending) { + do_wake = true; + c->abort_pending = false; + } + if (c->reset_pending) { + unsigned long flags; + struct hpsa_scsi_dev_t *dev; + + /* + * There appears to be a reset pending; lock the lock and + * reconfirm. If so, then decrement the count of outstanding + * commands and wake the reset command if this is the last one. + */ + spin_lock_irqsave(&h->lock, flags); + dev = c->reset_pending; /* Re-fetch under the lock. */ + if (dev && atomic_dec_and_test(&dev->reset_cmds_out)) + do_wake = true; + c->reset_pending = NULL; + spin_unlock_irqrestore(&h->lock, flags); + } + + if (do_wake) + wake_up_all(&h->event_sync_wait_queue); +} + +static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, + struct CommandList *c) +{ + hpsa_cmd_resolve_events(h, c); + cmd_tagged_free(h, c); +} + +static void hpsa_cmd_free_and_done(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd) +{ + hpsa_cmd_resolve_and_free(h, c); + cmd->scsi_done(cmd); +} + +static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) +{ + INIT_WORK(&c->work, hpsa_command_resubmit_worker); + queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); +} + +static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd) +{ + cmd->result = DID_ABORT << 16; +} + +static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c, + struct scsi_cmnd *cmd) +{ + hpsa_set_scsi_cmd_aborted(cmd); + dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", + c->Request.CDB, c->err_info->ScsiStatus); + hpsa_cmd_resolve_and_free(h, c); +} + +static void process_ioaccel2_completion(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd, + struct hpsa_scsi_dev_t *dev) +{ + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + + /* check for good status */ + if (likely(c2->error_data.serv_response == 0 && + c2->error_data.status == 0)) + return hpsa_cmd_free_and_done(h, c, cmd); + + /* + * Any RAID offload error results in retry which will use + * the normal I/O path so the controller can handle whatever's + * wrong. + */ + if (is_logical_device(dev) && + c2->error_data.serv_response == + IOACCEL2_SERV_RESPONSE_FAILURE) { + if (c2->error_data.status == + IOACCEL2_STATUS_SR_IOACCEL_DISABLED) + dev->offload_enabled = 0; + + return hpsa_retry_cmd(h, c); + } + + if (handle_ioaccel_mode2_error(h, c, cmd, c2)) + return hpsa_retry_cmd(h, c); + + return hpsa_cmd_free_and_done(h, c, cmd); +} + +/* Returns 0 on success, < 0 otherwise. */ +static int hpsa_evaluate_tmf_status(struct ctlr_info *h, + struct CommandList *cp) +{ + u8 tmf_status = cp->err_info->ScsiStatus; + + switch (tmf_status) { + case CISS_TMF_COMPLETE: + /* + * CISS_TMF_COMPLETE never happens, instead, + * ei->CommandStatus == 0 for this case. + */ + case CISS_TMF_SUCCESS: + return 0; + case CISS_TMF_INVALID_FRAME: + case CISS_TMF_NOT_SUPPORTED: + case CISS_TMF_FAILED: + case CISS_TMF_WRONG_LUN: + case CISS_TMF_OVERLAPPED_TAG: + break; + default: + dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", + tmf_status); + break; + } + return -tmf_status; } static void complete_scsi_command(struct CommandList *cp) @@ -1176,121 +2377,107 @@ struct scsi_cmnd *cmd; struct ctlr_info *h; struct ErrorInfo *ei; + struct hpsa_scsi_dev_t *dev; + struct io_accel2_cmd *c2; - unsigned char sense_key; - unsigned char asc; /* additional sense code */ - unsigned char ascq; /* additional sense code qualifier */ + u8 sense_key; + u8 asc; /* additional sense code */ + u8 ascq; /* additional sense code qualifier */ unsigned long sense_data_size; ei = cp->err_info; - cmd = (struct scsi_cmnd *) cp->scsi_cmd; + cmd = cp->scsi_cmd; h = cp->h; + dev = cmd->device->hostdata; + c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; scsi_dma_unmap(cmd); /* undo the DMA mappings */ - if (cp->Header.SGTotal > h->max_cmd_sg_entries) + if ((cp->cmd_type == CMD_SCSI) && + (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) hpsa_unmap_sg_chain_block(h, cp); + if ((cp->cmd_type == CMD_IOACCEL2) && + (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) + hpsa_unmap_ioaccel2_sg_chain_block(h, c2); + cmd->result = (DID_OK << 16); /* host byte */ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ - cmd->result |= ei->ScsiStatus; - /* copy the sense data whether we need to or not. */ - if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) - sense_data_size = SCSI_SENSE_BUFFERSIZE; - else - sense_data_size = sizeof(ei->SenseInfo); - if (ei->SenseLen < sense_data_size) - sense_data_size = ei->SenseLen; + if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) + atomic_dec(&cp->phys_disk->ioaccel_cmds_out); + + /* + * We check for lockup status here as it may be set for + * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by + * fail_all_oustanding_cmds() + */ + if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { + /* DID_NO_CONNECT will prevent a retry */ + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(h, cp, cmd); + } + + if ((unlikely(hpsa_is_pending_event(cp)))) { + if (cp->reset_pending) + return hpsa_cmd_resolve_and_free(h, cp); + if (cp->abort_pending) + return hpsa_cmd_abort_and_free(h, cp, cmd); + } + + if (cp->cmd_type == CMD_IOACCEL2) + return process_ioaccel2_completion(h, cp, cmd, dev); - memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); scsi_set_resid(cmd, ei->ResidualCnt); + if (ei->CommandStatus == 0) + return hpsa_cmd_free_and_done(h, cp, cmd); - if (ei->CommandStatus == 0) { - cmd_free(h, cp); - cmd->scsi_done(cmd); - return; + /* For I/O accelerator commands, copy over some fields to the normal + * CISS header used below for error handling. + */ + if (cp->cmd_type == CMD_IOACCEL1) { + struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; + cp->Header.SGList = scsi_sg_count(cmd); + cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); + cp->Request.CDBLen = le16_to_cpu(c->io_flags) & + IOACCEL1_IOFLAGS_CDBLEN_MASK; + cp->Header.tag = c->tag; + memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); + memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); + + /* Any RAID offload error results in retry which will use + * the normal I/O path so the controller can handle whatever's + * wrong. + */ + if (is_logical_device(dev)) { + if (ei->CommandStatus == CMD_IOACCEL_DISABLED) + dev->offload_enabled = 0; + return hpsa_retry_cmd(h, cp); + } } /* an error has occurred */ switch (ei->CommandStatus) { case CMD_TARGET_STATUS: - if (ei->ScsiStatus) { - /* Get sense key */ - sense_key = 0xf & ei->SenseInfo[2]; - /* Get additional sense code */ - asc = ei->SenseInfo[12]; - /* Get addition sense code qualifier */ - ascq = ei->SenseInfo[13]; - } - + cmd->result |= ei->ScsiStatus; + /* copy the sense data */ + if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) + sense_data_size = SCSI_SENSE_BUFFERSIZE; + else + sense_data_size = sizeof(ei->SenseInfo); + if (ei->SenseLen < sense_data_size) + sense_data_size = ei->SenseLen; + memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); + if (ei->ScsiStatus) + decode_sense_data(ei->SenseInfo, sense_data_size, + &sense_key, &asc, &ascq); if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { - if (check_for_unit_attention(h, cp)) { - cmd->result = DID_SOFT_ERROR << 16; - break; - } - if (sense_key == ILLEGAL_REQUEST) { - /* - * SCSI REPORT_LUNS is commonly unsupported on - * Smart Array. Suppress noisy complaint. - */ - if (cp->Request.CDB[0] == REPORT_LUNS) - break; - - /* If ASC/ASCQ indicate Logical Unit - * Not Supported condition, - */ - if ((asc == 0x25) && (ascq == 0x0)) { - dev_warn(&h->pdev->dev, "cp %p " - "has check condition\n", cp); - break; - } - } - - if (sense_key == NOT_READY) { - /* If Sense is Not Ready, Logical Unit - * Not ready, Manual Intervention - * required - */ - if ((asc == 0x04) && (ascq == 0x03)) { - dev_warn(&h->pdev->dev, "cp %p " - "has check condition: unit " - "not ready, manual " - "intervention required\n", cp); - break; - } - } if (sense_key == ABORTED_COMMAND) { - /* Aborted command is retryable */ - dev_warn(&h->pdev->dev, "cp %p " - "has check condition: aborted command: " - "ASC: 0x%x, ASCQ: 0x%x\n", - cp, asc, ascq); cmd->result |= DID_SOFT_ERROR << 16; break; } - /* Must be some other type of check condition */ - dev_dbg(&h->pdev->dev, "cp %p has check condition: " - "unknown type: " - "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " - "Returning result: 0x%x, " - "cmd=[%02x %02x %02x %02x %02x " - "%02x %02x %02x %02x %02x %02x " - "%02x %02x %02x %02x %02x]\n", - cp, sense_key, asc, ascq, - cmd->result, - cmd->cmnd[0], cmd->cmnd[1], - cmd->cmnd[2], cmd->cmnd[3], - cmd->cmnd[4], cmd->cmnd[5], - cmd->cmnd[6], cmd->cmnd[7], - cmd->cmnd[8], cmd->cmnd[9], - cmd->cmnd[10], cmd->cmnd[11], - cmd->cmnd[12], cmd->cmnd[13], - cmd->cmnd[14], cmd->cmnd[15]); break; } - - /* Problem was not a check condition * Pass it up to the upper layers... */ @@ -1324,9 +2511,8 @@ case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ break; case CMD_DATA_OVERRUN: - dev_warn(&h->pdev->dev, "cp %p has" - " completed with data overrun " - "reported\n", cp); + dev_warn(&h->pdev->dev, + "CDB %16phN data overrun\n", cp->Request.CDB); break; case CMD_INVALID: { /* print_bytes(cp, sizeof(*cp), 1, 0); @@ -1342,60 +2528,71 @@ break; case CMD_PROTOCOL_ERR: cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, "cp %p has " - "protocol error\n", cp); + dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", + cp->Request.CDB); break; case CMD_HARDWARE_ERR: cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); + dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", + cp->Request.CDB); break; case CMD_CONNECTION_LOST: cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); + dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", + cp->Request.CDB); break; case CMD_ABORTED: - cmd->result = DID_ABORT << 16; - dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", - cp, ei->ScsiStatus); - break; + /* Return now to avoid calling scsi_done(). */ + return hpsa_cmd_abort_and_free(h, cp, cmd); case CMD_ABORT_FAILED: cmd->result = DID_ERROR << 16; - dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); + dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", + cp->Request.CDB); break; case CMD_UNSOLICITED_ABORT: cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ - dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " - "abort\n", cp); + dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", + cp->Request.CDB); break; case CMD_TIMEOUT: cmd->result = DID_TIME_OUT << 16; - dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); + dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", + cp->Request.CDB); break; case CMD_UNABORTABLE: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "Command unabortable\n"); break; + case CMD_TMF_STATUS: + if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ + cmd->result = DID_ERROR << 16; + break; + case CMD_IOACCEL_DISABLED: + /* This only handles the direct pass-through case since RAID + * offload is handled above. Just attempt a retry. + */ + cmd->result = DID_SOFT_ERROR << 16; + dev_warn(&h->pdev->dev, + "cp %p had HP SSD Smart Path error\n", cp); + break; default: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); } - cmd_free(h, cp); - cmd->scsi_done(cmd); + + return hpsa_cmd_free_and_done(h, cp, cmd); } static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, int sg_used, int data_direction) { int i; - union u64bit addr64; - for (i = 0; i < sg_used; i++) { - addr64.val32.lower = c->SG[i].Addr.lower; - addr64.val32.upper = c->SG[i].Addr.upper; - pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, - data_direction); - } + for (i = 0; i < sg_used; i++) + pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), + le32_to_cpu(c->SG[i].Len), + data_direction); } static int hpsa_map_one(struct pci_dev *pdev, @@ -1408,62 +2605,82 @@ if (buflen == 0 || data_direction == PCI_DMA_NONE) { cp->Header.SGList = 0; - cp->Header.SGTotal = 0; + cp->Header.SGTotal = cpu_to_le16(0); return 0; } - addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); + addr64 = pci_map_single(pdev, buf, buflen, data_direction); if (dma_mapping_error(&pdev->dev, addr64)) { /* Prevent subsequent unmap of something never mapped */ cp->Header.SGList = 0; - cp->Header.SGTotal = 0; + cp->Header.SGTotal = cpu_to_le16(0); return -1; } - cp->SG[0].Addr.lower = - (u32) (addr64 & (u64) 0x00000000FFFFFFFF); - cp->SG[0].Addr.upper = - (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); - cp->SG[0].Len = buflen; - cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ - cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ + cp->SG[0].Addr = cpu_to_le64(addr64); + cp->SG[0].Len = cpu_to_le32(buflen); + cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ + cp->Header.SGList = 1; /* no. SGs contig in this cmd */ + cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ return 0; } -static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, - struct CommandList *c) +#define NO_TIMEOUT ((unsigned long) -1) +#define DEFAULT_TIMEOUT 30000 /* milliseconds */ +static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, + struct CommandList *c, int reply_queue, unsigned long timeout_msecs) { DECLARE_COMPLETION_ONSTACK(wait); c->waiting = &wait; - enqueue_cmd_and_start_io(h, c); - wait_for_completion(&wait); + __enqueue_cmd_and_start_io(h, c, reply_queue); + if (timeout_msecs == NO_TIMEOUT) { + /* TODO: get rid of this no-timeout thing */ + wait_for_completion_io(&wait); + return IO_OK; + } + if (!wait_for_completion_io_timeout(&wait, + msecs_to_jiffies(timeout_msecs))) { + dev_warn(&h->pdev->dev, "Command timed out.\n"); + return -ETIMEDOUT; + } + return IO_OK; } -static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, - struct CommandList *c) +static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, + int reply_queue, unsigned long timeout_msecs) { - unsigned long flags; - - /* If controller lockup detected, fake a hardware error. */ - spin_lock_irqsave(&h->lock, flags); - if (unlikely(h->lockup_detected)) { - spin_unlock_irqrestore(&h->lock, flags); - c->err_info->CommandStatus = CMD_HARDWARE_ERR; - } else { - spin_unlock_irqrestore(&h->lock, flags); - hpsa_scsi_do_simple_cmd_core(h, c); + if (unlikely(lockup_detected(h))) { + c->err_info->CommandStatus = CMD_CTLR_LOCKUP; + return IO_OK; } + return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); +} + +static u32 lockup_detected(struct ctlr_info *h) +{ + int cpu; + u32 rc, *lockup_detected; + + cpu = get_cpu(); + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); + rc = *lockup_detected; + put_cpu(); + return rc; } #define MAX_DRIVER_CMD_RETRIES 25 -static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, - struct CommandList *c, int data_direction) +static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, + struct CommandList *c, int data_direction, unsigned long timeout_msecs) { int backoff_time = 10, retry_count = 0; + int rc; do { memset(c->err_info, 0, sizeof(*c->err_info)); - hpsa_scsi_do_simple_cmd_core(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + timeout_msecs); + if (rc) + break; retry_count++; if (retry_count > 3) { msleep(backoff_time); @@ -1474,19 +2691,49 @@ check_for_busy(h, c)) && retry_count <= MAX_DRIVER_CMD_RETRIES); hpsa_pci_unmap(h->pdev, c, 1, data_direction); + if (retry_count > MAX_DRIVER_CMD_RETRIES) + rc = -EIO; + return rc; } -static void hpsa_scsi_interpret_error(struct CommandList *cp) +static void hpsa_print_cmd(struct ctlr_info *h, char *txt, + struct CommandList *c) { - struct ErrorInfo *ei; + const u8 *cdb = c->Request.CDB; + const u8 *lun = c->Header.LUN.LunAddrBytes; + + dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" + " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", + txt, lun[0], lun[1], lun[2], lun[3], + lun[4], lun[5], lun[6], lun[7], + cdb[0], cdb[1], cdb[2], cdb[3], + cdb[4], cdb[5], cdb[6], cdb[7], + cdb[8], cdb[9], cdb[10], cdb[11], + cdb[12], cdb[13], cdb[14], cdb[15]); +} + +static void hpsa_scsi_interpret_error(struct ctlr_info *h, + struct CommandList *cp) +{ + const struct ErrorInfo *ei = cp->err_info; struct device *d = &cp->h->pdev->dev; + u8 sense_key, asc, ascq; + int sense_len; - ei = cp->err_info; switch (ei->CommandStatus) { case CMD_TARGET_STATUS: - dev_warn(d, "cmd %p has completed with errors\n", cp); - dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, - ei->ScsiStatus); + if (ei->SenseLen > sizeof(ei->SenseInfo)) + sense_len = sizeof(ei->SenseInfo); + else + sense_len = ei->SenseLen; + decode_sense_data(ei->SenseInfo, sense_len, + &sense_key, &asc, &ascq); + hpsa_print_cmd(h, "SCSI status", cp); + if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) + dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n", + sense_key, asc, ascq); + else + dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus); if (ei->ScsiStatus == 0) dev_warn(d, "SCSI status is abnormally zero. " "(probably indicates selection timeout " @@ -1494,108 +2741,225 @@ "firmware bug, circa July, 2001.)\n"); break; case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ - dev_info(d, "UNDERRUN\n"); break; case CMD_DATA_OVERRUN: - dev_warn(d, "cp %p has completed with data overrun\n", cp); + hpsa_print_cmd(h, "overrun condition", cp); break; case CMD_INVALID: { /* controller unfortunately reports SCSI passthru's * to non-existent targets as invalid commands. */ - dev_warn(d, "cp %p is reported invalid (probably means " - "target device no longer present)\n", cp); - /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); - print_cmd(cp); */ + hpsa_print_cmd(h, "invalid command", cp); + dev_warn(d, "probably means device no longer present\n"); } break; case CMD_PROTOCOL_ERR: - dev_warn(d, "cp %p has protocol error \n", cp); + hpsa_print_cmd(h, "protocol error", cp); break; case CMD_HARDWARE_ERR: - /* cmd->result = DID_ERROR << 16; */ - dev_warn(d, "cp %p had hardware error\n", cp); + hpsa_print_cmd(h, "hardware error", cp); break; case CMD_CONNECTION_LOST: - dev_warn(d, "cp %p had connection lost\n", cp); + hpsa_print_cmd(h, "connection lost", cp); break; case CMD_ABORTED: - dev_warn(d, "cp %p was aborted\n", cp); + hpsa_print_cmd(h, "aborted", cp); break; case CMD_ABORT_FAILED: - dev_warn(d, "cp %p reports abort failed\n", cp); + hpsa_print_cmd(h, "abort failed", cp); break; case CMD_UNSOLICITED_ABORT: - dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); + hpsa_print_cmd(h, "unsolicited abort", cp); break; case CMD_TIMEOUT: - dev_warn(d, "cp %p timed out\n", cp); + hpsa_print_cmd(h, "timed out", cp); break; case CMD_UNABORTABLE: - dev_warn(d, "Command unabortable\n"); + hpsa_print_cmd(h, "unabortable", cp); + break; + case CMD_CTLR_LOCKUP: + hpsa_print_cmd(h, "controller lockup detected", cp); break; default: - dev_warn(d, "cp %p returned unknown status %x\n", cp, + hpsa_print_cmd(h, "unknown status", cp); + dev_warn(d, "Unknown command status %x\n", ei->CommandStatus); } } static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, - unsigned char page, unsigned char *buf, + u16 page, unsigned char *buf, unsigned char bufsize) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; - c = cmd_special_alloc(h); - - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); - return -ENOMEM; - } + c = cmd_alloc(h); if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD)) { rc = -1; goto out; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { - hpsa_scsi_interpret_error(c); + hpsa_scsi_interpret_error(h, c); rc = -1; } out: - cmd_special_free(h, c); + cmd_free(h, c); return rc; } -static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) +static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, + u8 reset_type, int reply_queue) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; - c = cmd_special_alloc(h); + c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); - return -ENOMEM; - } /* fill_cmd can't fail here, no data buffer to map. */ - (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, - NULL, 0, 0, scsi3addr, TYPE_MSG); - hpsa_scsi_do_simple_cmd_core(h, c); + (void) fill_cmd(c, reset_type, h, NULL, 0, 0, + scsi3addr, TYPE_MSG); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + if (rc) { + dev_warn(&h->pdev->dev, "Failed to send reset command\n"); + goto out; + } /* no unmap needed here because no data xfer. */ ei = c->err_info; if (ei->CommandStatus != 0) { - hpsa_scsi_interpret_error(c); + hpsa_scsi_interpret_error(h, c); rc = -1; } - cmd_special_free(h, c); +out: + cmd_free(h, c); + return rc; +} + +static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, + struct hpsa_scsi_dev_t *dev, + unsigned char *scsi3addr) +{ + int i; + bool match = false; + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; + + if (hpsa_is_cmd_idle(c)) + return false; + + switch (c->cmd_type) { + case CMD_SCSI: + case CMD_IOCTL_PEND: + match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes, + sizeof(c->Header.LUN.LunAddrBytes)); + break; + + case CMD_IOACCEL1: + case CMD_IOACCEL2: + if (c->phys_disk == dev) { + /* HBA mode match */ + match = true; + } else { + /* Possible RAID mode -- check each phys dev. */ + /* FIXME: Do we need to take out a lock here? If + * so, we could just call hpsa_get_pdisk_of_ioaccel2() + * instead. */ + for (i = 0; i < dev->nphysical_disks && !match; i++) { + /* FIXME: an alternate test might be + * + * match = dev->phys_disk[i]->ioaccel_handle + * == c2->scsi_nexus; */ + match = dev->phys_disk[i] == c->phys_disk; + } + } + break; + + case IOACCEL2_TMF: + for (i = 0; i < dev->nphysical_disks && !match; i++) { + match = dev->phys_disk[i]->ioaccel_handle == + le32_to_cpu(ac->it_nexus); + } + break; + + case 0: /* The command is in the middle of being initialized. */ + match = false; + break; + + default: + dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", + c->cmd_type); + BUG(); + } + + return match; +} + +static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, + unsigned char *scsi3addr, u8 reset_type, int reply_queue) +{ + int i; + int rc = 0; + + /* We can really only handle one reset at a time */ + if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { + dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); + return -EINTR; + } + + BUG_ON(atomic_read(&dev->reset_cmds_out) != 0); + + for (i = 0; i < h->nr_cmds; i++) { + struct CommandList *c = h->cmd_pool + i; + int refcount = atomic_inc_return(&c->refcount); + + if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) { + unsigned long flags; + + /* + * Mark the target command as having a reset pending, + * then lock a lock so that the command cannot complete + * while we're considering it. If the command is not + * idle then count it; otherwise revoke the event. + */ + c->reset_pending = dev; + spin_lock_irqsave(&h->lock, flags); /* Implied MB */ + if (!hpsa_is_cmd_idle(c)) + atomic_inc(&dev->reset_cmds_out); + else + c->reset_pending = NULL; + spin_unlock_irqrestore(&h->lock, flags); + } + + cmd_free(h, c); + } + + rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue); + if (!rc) + wait_event(h->event_sync_wait_queue, + atomic_read(&dev->reset_cmds_out) == 0 || + lockup_detected(h)); + + if (unlikely(lockup_detected(h))) { + dev_warn(&h->pdev->dev, + "Controller lockup detected during reset wait\n"); + rc = -ENODEV; + } + + if (unlikely(rc)) + atomic_set(&dev->reset_cmds_out, 0); + + mutex_unlock(&h->reset_mutex); return rc; } @@ -1609,7 +2973,7 @@ buf = kzalloc(64, GFP_KERNEL); if (!buf) return; - rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); if (rc == 0) *raid_level = buf[8]; if (*raid_level > RAID_UNKNOWN) @@ -1618,9 +2982,361 @@ return; } +#define HPSA_MAP_DEBUG +#ifdef HPSA_MAP_DEBUG +static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, + struct raid_map_data *map_buff) +{ + struct raid_map_disk_data *dd = &map_buff->data[0]; + int map, row, col; + u16 map_cnt, row_cnt, disks_per_row; + + if (rc != 0) + return; + + /* Show details only if debugging has been activated. */ + if (h->raid_offload_debug < 2) + return; + + dev_info(&h->pdev->dev, "structure_size = %u\n", + le32_to_cpu(map_buff->structure_size)); + dev_info(&h->pdev->dev, "volume_blk_size = %u\n", + le32_to_cpu(map_buff->volume_blk_size)); + dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", + le64_to_cpu(map_buff->volume_blk_cnt)); + dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", + map_buff->phys_blk_shift); + dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", + map_buff->parity_rotation_shift); + dev_info(&h->pdev->dev, "strip_size = %u\n", + le16_to_cpu(map_buff->strip_size)); + dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", + le64_to_cpu(map_buff->disk_starting_blk)); + dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", + le64_to_cpu(map_buff->disk_blk_cnt)); + dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", + le16_to_cpu(map_buff->data_disks_per_row)); + dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", + le16_to_cpu(map_buff->metadata_disks_per_row)); + dev_info(&h->pdev->dev, "row_cnt = %u\n", + le16_to_cpu(map_buff->row_cnt)); + dev_info(&h->pdev->dev, "layout_map_count = %u\n", + le16_to_cpu(map_buff->layout_map_count)); + dev_info(&h->pdev->dev, "flags = 0x%x\n", + le16_to_cpu(map_buff->flags)); + dev_info(&h->pdev->dev, "encrypytion = %s\n", + le16_to_cpu(map_buff->flags) & + RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); + dev_info(&h->pdev->dev, "dekindex = %u\n", + le16_to_cpu(map_buff->dekindex)); + map_cnt = le16_to_cpu(map_buff->layout_map_count); + for (map = 0; map < map_cnt; map++) { + dev_info(&h->pdev->dev, "Map%u:\n", map); + row_cnt = le16_to_cpu(map_buff->row_cnt); + for (row = 0; row < row_cnt; row++) { + dev_info(&h->pdev->dev, " Row%u:\n", row); + disks_per_row = + le16_to_cpu(map_buff->data_disks_per_row); + for (col = 0; col < disks_per_row; col++, dd++) + dev_info(&h->pdev->dev, + " D%02u: h=0x%04x xor=%u,%u\n", + col, dd->ioaccel_handle, + dd->xor_mult[0], dd->xor_mult[1]); + disks_per_row = + le16_to_cpu(map_buff->metadata_disks_per_row); + for (col = 0; col < disks_per_row; col++, dd++) + dev_info(&h->pdev->dev, + " M%02u: h=0x%04x xor=%u,%u\n", + col, dd->ioaccel_handle, + dd->xor_mult[0], dd->xor_mult[1]); + } + } +} +#else +static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, + __attribute__((unused)) int rc, + __attribute__((unused)) struct raid_map_data *map_buff) +{ +} +#endif + +static int hpsa_get_raid_map(struct ctlr_info *h, + unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ + int rc = 0; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + + if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, + sizeof(this_device->raid_map), 0, + scsi3addr, TYPE_CMD)) { + dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); + cmd_free(h, c); + return -1; + } + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + goto out; + } + cmd_free(h, c); + + /* @todo in the future, dynamically allocate RAID map memory */ + if (le32_to_cpu(this_device->raid_map.structure_size) > + sizeof(this_device->raid_map)) { + dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); + rc = -1; + } + hpsa_debug_map_buff(h, rc, &this_device->raid_map); + return rc; +out: + cmd_free(h, c); + return rc; +} + +static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, + unsigned char scsi3addr[], u16 bmic_device_index, + struct bmic_sense_subsystem_info *buf, size_t bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + + rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize, + 0, RAID_CTLR_LUNID, TYPE_CMD); + if (rc) + goto out; + + c->Request.CDB[2] = bmic_device_index & 0xff; + c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + return rc; +} + +static int hpsa_bmic_id_controller(struct ctlr_info *h, + struct bmic_identify_controller *buf, size_t bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + + rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize, + 0, RAID_CTLR_LUNID, TYPE_CMD); + if (rc) + goto out; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + return rc; +} + +static int hpsa_bmic_id_physical_device(struct ctlr_info *h, + unsigned char scsi3addr[], u16 bmic_device_index, + struct bmic_identify_physical_device *buf, size_t bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, + 0, RAID_CTLR_LUNID, TYPE_CMD); + if (rc) + goto out; + + c->Request.CDB[2] = bmic_device_index & 0xff; + c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; + + hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, + NO_TIMEOUT); + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + + return rc; +} + +static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, + unsigned char *scsi3addr) +{ + struct ReportExtendedLUNdata *physdev; + u32 nphysicals; + u64 sa = 0; + int i; + + physdev = kzalloc(sizeof(*physdev), GFP_KERNEL); + if (!physdev) + return 0; + + if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { + dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); + kfree(physdev); + return 0; + } + nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24; + + for (i = 0; i < nphysicals; i++) + if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) { + sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]); + break; + } + + kfree(physdev); + + return sa; +} + +static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, + struct hpsa_scsi_dev_t *dev) +{ + int rc; + u64 sa = 0; + + if (is_hba_lunid(scsi3addr)) { + struct bmic_sense_subsystem_info *ssi; + + ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); + if (ssi == NULL) { + dev_warn(&h->pdev->dev, + "%s: out of memory\n", __func__); + return; + } + + rc = hpsa_bmic_sense_subsystem_information(h, + scsi3addr, 0, ssi, sizeof(*ssi)); + if (rc == 0) { + sa = get_unaligned_be64(ssi->primary_world_wide_id); + h->sas_address = sa; + } + + kfree(ssi); + } else + sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr); + + dev->sas_address = sa; +} + +/* Get a device id from inquiry page 0x83 */ +static int hpsa_vpd_page_supported(struct ctlr_info *h, + unsigned char scsi3addr[], u8 page) +{ + int rc; + int i; + int pages; + unsigned char *buf, bufsize; + + buf = kzalloc(256, GFP_KERNEL); + if (!buf) + return 0; + + /* Get the size of the page list first */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, + VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, + buf, HPSA_VPD_HEADER_SZ); + if (rc != 0) + goto exit_unsupported; + pages = buf[3]; + if ((pages + HPSA_VPD_HEADER_SZ) <= 255) + bufsize = pages + HPSA_VPD_HEADER_SZ; + else + bufsize = 255; + + /* Get the whole VPD page list */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, + VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, + buf, bufsize); + if (rc != 0) + goto exit_unsupported; + + pages = buf[3]; + for (i = 1; i <= pages; i++) + if (buf[3 + i] == page) + goto exit_supported; +exit_unsupported: + kfree(buf); + return 0; +exit_supported: + kfree(buf); + return 1; +} + +static void hpsa_get_ioaccel_status(struct ctlr_info *h, + unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ + int rc; + unsigned char *buf; + u8 ioaccel_status; + + this_device->offload_config = 0; + this_device->offload_enabled = 0; + this_device->offload_to_be_enabled = 0; + + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return; + if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) + goto out; + rc = hpsa_scsi_do_inquiry(h, scsi3addr, + VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); + if (rc != 0) + goto out; + +#define IOACCEL_STATUS_BYTE 4 +#define OFFLOAD_CONFIGURED_BIT 0x01 +#define OFFLOAD_ENABLED_BIT 0x02 + ioaccel_status = buf[IOACCEL_STATUS_BYTE]; + this_device->offload_config = + !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); + if (this_device->offload_config) { + this_device->offload_enabled = + !!(ioaccel_status & OFFLOAD_ENABLED_BIT); + if (hpsa_get_raid_map(h, scsi3addr, this_device)) + this_device->offload_enabled = 0; + } + this_device->offload_to_be_enabled = this_device->offload_enabled; +out: + kfree(buf); + return; +} + /* Get the device id from inquiry page 0x83 */ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, - unsigned char *device_id, int buflen) + unsigned char *device_id, int index, int buflen) { int rc; unsigned char *buf; @@ -1629,16 +3345,18 @@ buflen = 16; buf = kzalloc(64, GFP_KERNEL); if (!buf) - return -1; - rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); + return -ENOMEM; + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); if (rc == 0) - memcpy(device_id, &buf[8], buflen); + memcpy(device_id, &buf[index], buflen); + kfree(buf); + return rc != 0; } static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, - struct ReportLUNdata *buf, int bufsize, + void *buf, int bufsize, int extended_response) { int rc = IO_OK; @@ -1646,11 +3364,8 @@ unsigned char scsi3addr[8]; struct ErrorInfo *ei; - c = cmd_special_alloc(h); - if (c == NULL) { /* trouble... */ - dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); - return -1; - } + c = cmd_alloc(h); + /* address the controller */ memset(scsi3addr, 0, sizeof(scsi3addr)); if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, @@ -1660,23 +3375,36 @@ } if (extended_response) c->Request.CDB[1] = extended_response; - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { - hpsa_scsi_interpret_error(c); + hpsa_scsi_interpret_error(h, c); rc = -1; + } else { + struct ReportLUNdata *rld = buf; + + if (rld->extended_response_flag != extended_response) { + dev_err(&h->pdev->dev, + "report luns requested format %u, got %u\n", + extended_response, + rld->extended_response_flag); + rc = -1; + } } out: - cmd_special_free(h, c); + cmd_free(h, c); return rc; } static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, - struct ReportLUNdata *buf, - int bufsize, int extended_response) + struct ReportExtendedLUNdata *buf, int bufsize) { - return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); + return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, + HPSA_REPORT_PHYS_EXTENDED); } static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, @@ -1693,6 +3421,177 @@ device->lun = lun; } +/* Use VPD inquiry to get details of volume status */ +static int hpsa_get_volume_status(struct ctlr_info *h, + unsigned char scsi3addr[]) +{ + int rc; + int status; + int size; + unsigned char *buf; + + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return HPSA_VPD_LV_STATUS_UNSUPPORTED; + + /* Does controller have VPD for logical volume status? */ + if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) + goto exit_failed; + + /* Get the size of the VPD return buffer */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, + buf, HPSA_VPD_HEADER_SZ); + if (rc != 0) + goto exit_failed; + size = buf[3]; + + /* Now get the whole VPD buffer */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, + buf, size + HPSA_VPD_HEADER_SZ); + if (rc != 0) + goto exit_failed; + status = buf[4]; /* status byte */ + + kfree(buf); + return status; +exit_failed: + kfree(buf); + return HPSA_VPD_LV_STATUS_UNSUPPORTED; +} + +/* Determine offline status of a volume. + * Return either: + * 0 (not offline) + * 0xff (offline for unknown reasons) + * # (integer code indicating one of several NOT READY states + * describing why a volume is to be kept offline) + */ +static int hpsa_volume_offline(struct ctlr_info *h, + unsigned char scsi3addr[]) +{ + struct CommandList *c; + unsigned char *sense; + u8 sense_key, asc, ascq; + int sense_len; + int rc, ldstat = 0; + u16 cmd_status; + u8 scsi_status; +#define ASC_LUN_NOT_READY 0x04 +#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 +#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 + + c = cmd_alloc(h); + + (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + if (rc) { + cmd_free(h, c); + return 0; + } + sense = c->err_info->SenseInfo; + if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) + sense_len = sizeof(c->err_info->SenseInfo); + else + sense_len = c->err_info->SenseLen; + decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq); + cmd_status = c->err_info->CommandStatus; + scsi_status = c->err_info->ScsiStatus; + cmd_free(h, c); + /* Is the volume 'not ready'? */ + if (cmd_status != CMD_TARGET_STATUS || + scsi_status != SAM_STAT_CHECK_CONDITION || + sense_key != NOT_READY || + asc != ASC_LUN_NOT_READY) { + return 0; + } + + /* Determine the reason for not ready state */ + ldstat = hpsa_get_volume_status(h, scsi3addr); + + /* Keep volume offline in certain cases: */ + switch (ldstat) { + case HPSA_LV_UNDERGOING_ERASE: + case HPSA_LV_NOT_AVAILABLE: + case HPSA_LV_UNDERGOING_RPI: + case HPSA_LV_PENDING_RPI: + case HPSA_LV_ENCRYPTED_NO_KEY: + case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: + case HPSA_LV_UNDERGOING_ENCRYPTION: + case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: + case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: + return ldstat; + case HPSA_VPD_LV_STATUS_UNSUPPORTED: + /* If VPD status page isn't available, + * use ASC/ASCQ to determine state + */ + if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || + (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) + return ldstat; + break; + default: + break; + } + return 0; +} + +/* + * Find out if a logical device supports aborts by simply trying one. + * Smart Array may claim not to support aborts on logical drives, but + * if a MSA2000 * is connected, the drives on that will be presented + * by the Smart Array as logical drives, and aborts may be sent to + * those devices successfully. So the simplest way to find out is + * to simply try an abort and see how the device responds. + */ +static int hpsa_device_supports_aborts(struct ctlr_info *h, + unsigned char *scsi3addr) +{ + struct CommandList *c; + struct ErrorInfo *ei; + int rc = 0; + + u64 tag = (u64) -1; /* bogus tag */ + + /* Assume that physical devices support aborts */ + if (!is_logical_dev_addr_mode(scsi3addr)) + return 1; + + c = cmd_alloc(h); + + (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); + (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + /* no unmap needed here because no data xfer. */ + ei = c->err_info; + switch (ei->CommandStatus) { + case CMD_INVALID: + rc = 0; + break; + case CMD_UNABORTABLE: + case CMD_ABORT_FAILED: + rc = 1; + break; + case CMD_TMF_STATUS: + rc = hpsa_evaluate_tmf_status(h, c); + break; + default: + rc = 0; + break; + } + cmd_free(h, c); + return rc; +} + +static void sanitize_inquiry_string(unsigned char *s, int len) +{ + bool terminated = false; + + for (; len > 0; (--len, ++s)) { + if (*s == 0) + terminated = true; + if (terminated || *s < 0x20 || *s > 0x7e) + *s = ' '; + } +} + static int hpsa_update_device_info(struct ctlr_info *h, unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, unsigned char *is_OBDR_device) @@ -1705,10 +3604,13 @@ unsigned char *inq_buff; unsigned char *obdr_sig; + int rc = 0; inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); - if (!inq_buff) + if (!inq_buff) { + rc = -ENOMEM; goto bail_out; + } /* Do an inquiry to the device to see what it is. */ if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, @@ -1716,9 +3618,13 @@ /* Inquiry failed (msg printed already) */ dev_err(&h->pdev->dev, "hpsa_update_device_info: inquiry failed\n"); + rc = -EIO; goto bail_out; } + sanitize_inquiry_string(&inq_buff[8], 8); + sanitize_inquiry_string(&inq_buff[16], 16); + this_device->devtype = (inq_buff[0] & 0x1f); memcpy(this_device->scsi3addr, scsi3addr, 8); memcpy(this_device->vendor, &inq_buff[8], @@ -1727,14 +3633,29 @@ sizeof(this_device->model)); memset(this_device->device_id, 0, sizeof(this_device->device_id)); - hpsa_get_device_id(h, scsi3addr, this_device->device_id, + hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, sizeof(this_device->device_id)); if (this_device->devtype == TYPE_DISK && - is_logical_dev_addr_mode(scsi3addr)) + is_logical_dev_addr_mode(scsi3addr)) { + int volume_offline; + hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); - else + if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) + hpsa_get_ioaccel_status(h, scsi3addr, this_device); + volume_offline = hpsa_volume_offline(h, scsi3addr); + if (volume_offline < 0 || volume_offline > 0xff) + volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; + this_device->volume_offline = volume_offline & 0xff; + } else { this_device->raid_level = RAID_UNKNOWN; + this_device->offload_config = 0; + this_device->offload_enabled = 0; + this_device->offload_to_be_enabled = 0; + this_device->hba_ioaccel_enabled = 0; + this_device->volume_offline = 0; + this_device->queue_depth = h->nr_cmds; + } if (is_OBDR_device) { /* See if this is a One-Button-Disaster-Recovery device @@ -1745,121 +3666,120 @@ strncmp(obdr_sig, OBDR_TAPE_SIG, OBDR_SIG_LEN) == 0); } - kfree(inq_buff); return 0; bail_out: kfree(inq_buff); - return 1; + return rc; } -static unsigned char *ext_target_model[] = { - "MSA2012", - "MSA2024", - "MSA2312", - "MSA2324", - "P2000 G3 SAS", - NULL, -}; - -static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) +static void hpsa_update_device_supports_aborts(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev, u8 *scsi3addr) { - int i; - - for (i = 0; ext_target_model[i]; i++) - if (strncmp(device->model, ext_target_model[i], - strlen(ext_target_model[i])) == 0) - return 1; - return 0; + unsigned long flags; + int rc, entry; + /* + * See if this device supports aborts. If we already know + * the device, we already know if it supports aborts, otherwise + * we have to find out if it supports aborts by trying one. + */ + spin_lock_irqsave(&h->devlock, flags); + rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry); + if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) && + entry >= 0 && entry < h->ndevices) { + dev->supports_aborts = h->dev[entry]->supports_aborts; + spin_unlock_irqrestore(&h->devlock, flags); + } else { + spin_unlock_irqrestore(&h->devlock, flags); + dev->supports_aborts = + hpsa_device_supports_aborts(h, scsi3addr); + if (dev->supports_aborts < 0) + dev->supports_aborts = 0; + } } -/* Helper function to assign bus, target, lun mapping of devices. - * Puts non-external target logical volumes on bus 0, external target logical - * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. +/* + * Helper function to assign bus, target, lun mapping of devices. * Logical drive target and lun are assigned at this time, but * physical device lun and target assignment are deferred (assigned * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) - */ +*/ static void figure_bus_target_lun(struct ctlr_info *h, u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) { - u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); + u32 lunid = get_unaligned_le32(lunaddrbytes); if (!is_logical_dev_addr_mode(lunaddrbytes)) { /* physical device, target and lun filled in later */ if (is_hba_lunid(lunaddrbytes)) - hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff); + hpsa_set_bus_target_lun(device, + HPSA_HBA_BUS, 0, lunid & 0x3fff); else /* defer target, lun assignment for physical devices */ - hpsa_set_bus_target_lun(device, 2, -1, -1); + hpsa_set_bus_target_lun(device, + HPSA_PHYSICAL_DEVICE_BUS, -1, -1); return; } /* It's a logical device */ - if (is_ext_target(h, device)) { - /* external target way, put logicals on bus 1 - * and match target/lun numbers box - * reports, other smart array, bus 0, target 0, match lunid - */ + if (device->external) { hpsa_set_bus_target_lun(device, - 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff); + HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff, + lunid & 0x00ff); return; } - hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff); + hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS, + 0, lunid & 0x3fff); } + /* - * If there is no lun 0 on a target, linux won't find any devices. - * For the external targets (arrays), we have to manually detect the enclosure - * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report - * it for some reason. *tmpdevice is the target we're adding, - * this_device is a pointer into the current element of currentsd[] - * that we're building up in update_scsi_devices(), below. - * lunzerobits is a bitmap that tracks which targets already have a - * lun 0 assigned. - * Returns 1 if an enclosure was added, 0 if not. - */ -static int add_ext_target_dev(struct ctlr_info *h, - struct hpsa_scsi_dev_t *tmpdevice, - struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, - unsigned long lunzerobits[], int *n_ext_target_devs) + * Get address of physical disk used for an ioaccel2 mode command: + * 1. Extract ioaccel2 handle from the command. + * 2. Find a matching ioaccel2 handle from list of physical disks. + * 3. Return: + * 1 and set scsi3addr to address of matching physical + * 0 if no matching physical disk was found. + */ +static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, + struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) { - unsigned char scsi3addr[8]; - - if (test_bit(tmpdevice->target, lunzerobits)) - return 0; /* There is already a lun 0 on this target. */ + struct io_accel2_cmd *c2 = + &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; + unsigned long flags; + int i; - if (!is_logical_dev_addr_mode(lunaddrbytes)) - return 0; /* It's the logical targets that may lack lun 0. */ + spin_lock_irqsave(&h->devlock, flags); + for (i = 0; i < h->ndevices; i++) + if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) { + memcpy(scsi3addr, h->dev[i]->scsi3addr, + sizeof(h->dev[i]->scsi3addr)); + spin_unlock_irqrestore(&h->devlock, flags); + return 1; + } + spin_unlock_irqrestore(&h->devlock, flags); + return 0; +} - if (!is_ext_target(h, tmpdevice)) - return 0; /* Only external target devices have this problem. */ +static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, + int i, int nphysicals, int nlocal_logicals) +{ + /* In report logicals, local logicals are listed first, + * then any externals. + */ + int logicals_start = nphysicals + (raid_ctlr_position == 0); - if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */ + if (i == raid_ctlr_position) return 0; - memset(scsi3addr, 0, 8); - scsi3addr[3] = tmpdevice->target; - if (is_hba_lunid(scsi3addr)) - return 0; /* Don't add the RAID controller here. */ - - if (is_scsi_rev_5(h)) - return 0; /* p1210m doesn't need to do this. */ - - if (*n_ext_target_devs >= MAX_EXT_TARGETS) { - dev_warn(&h->pdev->dev, "Maximum number of external " - "target devices exceeded. Check your hardware " - "configuration."); + if (i < logicals_start) return 0; - } - if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) + /* i is in logicals range, but still within local logicals */ + if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals) return 0; - (*n_ext_target_devs)++; - hpsa_set_bus_target_lun(this_device, - tmpdevice->bus, tmpdevice->target, 0); - set_bit(tmpdevice->target, lunzerobits); - return 1; + + return 1; /* it's an external lun */ } /* @@ -1869,22 +3789,20 @@ * Returns 0 on success, -1 otherwise. */ static int hpsa_gather_lun_info(struct ctlr_info *h, - int reportlunsize, - struct ReportLUNdata *physdev, u32 *nphysicals, + struct ReportExtendedLUNdata *physdev, u32 *nphysicals, struct ReportLUNdata *logdev, u32 *nlogicals) { - if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { + if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); return -1; } - *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; + *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; if (*nphysicals > HPSA_MAX_PHYS_LUN) { - dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." - " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, - *nphysicals - HPSA_MAX_PHYS_LUN); + dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", + HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); *nphysicals = HPSA_MAX_PHYS_LUN; } - if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { + if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); return -1; } @@ -1907,8 +3825,9 @@ return 0; } -u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, - int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, +static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, + int i, int nphysicals, int nlogicals, + struct ReportExtendedLUNdata *physdev_list, struct ReportLUNdata *logdev_list) { /* Helper function, figure out where the LUN ID info is coming from @@ -1923,7 +3842,8 @@ return RAID_CTLR_LUNID; if (i < logicals_start) - return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; + return &physdev_list->LUN[i - + (raid_ctlr_position == 0)].lunid[0]; if (i < last_device) return &logdev_list->LUN[i - nphysicals - @@ -1932,7 +3852,150 @@ return NULL; } -static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) +/* get physical drive ioaccel handle and queue depth */ +static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev, + struct ReportExtendedLUNdata *rlep, int rle_index, + struct bmic_identify_physical_device *id_phys) +{ + int rc; + struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; + + dev->ioaccel_handle = rle->ioaccel_handle; + if ((rle->device_flags & 0x08) && dev->ioaccel_handle) + dev->hba_ioaccel_enabled = 1; + memset(id_phys, 0, sizeof(*id_phys)); + rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], + GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys, + sizeof(*id_phys)); + if (!rc) + /* Reserve space for FW operations */ +#define DRIVE_CMDS_RESERVED_FOR_FW 2 +#define DRIVE_QUEUE_DEPTH 7 + dev->queue_depth = + le16_to_cpu(id_phys->current_queue_depth_limit) - + DRIVE_CMDS_RESERVED_FOR_FW; + else + dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ +} + +static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device, + struct ReportExtendedLUNdata *rlep, int rle_index, + struct bmic_identify_physical_device *id_phys) +{ + struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; + + if ((rle->device_flags & 0x08) && this_device->ioaccel_handle) + this_device->hba_ioaccel_enabled = 1; + + memcpy(&this_device->active_path_index, + &id_phys->active_path_number, + sizeof(this_device->active_path_index)); + memcpy(&this_device->path_map, + &id_phys->redundant_path_present_map, + sizeof(this_device->path_map)); + memcpy(&this_device->box, + &id_phys->alternate_paths_phys_box_on_port, + sizeof(this_device->box)); + memcpy(&this_device->phys_connector, + &id_phys->alternate_paths_phys_connector, + sizeof(this_device->phys_connector)); + memcpy(&this_device->bay, + &id_phys->phys_bay_in_box, + sizeof(this_device->bay)); +} + +/* get number of local logical disks. */ +static int hpsa_set_local_logical_count(struct ctlr_info *h, + struct bmic_identify_controller *id_ctlr, + u32 *nlocals) +{ + int rc; + + if (!id_ctlr) { + dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n", + __func__); + return -ENOMEM; + } + memset(id_ctlr, 0, sizeof(*id_ctlr)); + rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); + if (!rc) + if (id_ctlr->configured_logical_drive_count < 256) + *nlocals = id_ctlr->configured_logical_drive_count; + else + *nlocals = le16_to_cpu( + id_ctlr->extended_logical_unit_count); + else + *nlocals = -1; + return rc; +} + +static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes) +{ + struct bmic_identify_physical_device *id_phys; + bool is_spare = false; + int rc; + + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); + if (!id_phys) + return false; + + rc = hpsa_bmic_id_physical_device(h, + lunaddrbytes, + GET_BMIC_DRIVE_NUMBER(lunaddrbytes), + id_phys, sizeof(*id_phys)); + if (rc == 0) + is_spare = (id_phys->more_flags >> 6) & 0x01; + + kfree(id_phys); + return is_spare; +} + +#define RPL_DEV_FLAG_NON_DISK 0x1 +#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2 +#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4 + +#define BMIC_DEVICE_TYPE_ENCLOSURE 6 + +static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes, + struct ext_report_lun_entry *rle) +{ + u8 device_flags; + u8 device_type; + + if (!MASKED_DEVICE(lunaddrbytes)) + return false; + + device_flags = rle->device_flags; + device_type = rle->device_type; + + if (device_flags & RPL_DEV_FLAG_NON_DISK) { + if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE) + return false; + return true; + } + + if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED)) + return false; + + if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK) + return false; + + /* + * Spares may be spun down, we do not want to + * do an Inquiry to a RAID set spare drive as + * that would have them spun up, that is a + * performance hit because I/O to the RAID device + * stops while the spin up occurs which can take + * over 50 seconds. + */ + if (hpsa_is_disk_spare(h, lunaddrbytes)) + return true; + + return false; +} + +static void hpsa_update_scsi_devices(struct ctlr_info *h) { /* the idea here is we could get notified * that some devices have changed, so we do a report @@ -1944,32 +4007,49 @@ * tell which devices we already know about, vs. new * devices, vs. disappearing devices. */ - struct ReportLUNdata *physdev_list = NULL; + struct ReportExtendedLUNdata *physdev_list = NULL; struct ReportLUNdata *logdev_list = NULL; + struct bmic_identify_physical_device *id_phys = NULL; + struct bmic_identify_controller *id_ctlr = NULL; u32 nphysicals = 0; u32 nlogicals = 0; + u32 nlocal_logicals = 0; u32 ndev_allocated = 0; struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; int ncurrent = 0; - int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; int i, n_ext_target_devs, ndevs_to_allocate; int raid_ctlr_position; + bool physical_device; DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); - physdev_list = kzalloc(reportlunsize, GFP_KERNEL); - logdev_list = kzalloc(reportlunsize, GFP_KERNEL); + physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); + logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); + id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL); - if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { + if (!currentsd || !physdev_list || !logdev_list || + !tmpdevice || !id_phys || !id_ctlr) { dev_err(&h->pdev->dev, "out of memory\n"); goto out; } memset(lunzerobits, 0, sizeof(lunzerobits)); - if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, - logdev_list, &nlogicals)) + h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ + + if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, + logdev_list, &nlogicals)) { + h->drv_req_rescan = 1; goto out; + } + + /* Set number of local logicals (non PTRAID) */ + if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) { + dev_warn(&h->pdev->dev, + "%s: Can't determine number of local logical devices.\n", + __func__); + } /* We might see up to the maximum number of logical and physical disks * plus external target devices, and a device for the local RAID @@ -1990,12 +4070,13 @@ if (!currentsd[i]) { dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", __FILE__, __LINE__); + h->drv_req_rescan = 1; goto out; } ndev_allocated++; } - if (unlikely(is_scsi_rev_5(h))) + if (is_scsi_rev_5(h)) raid_ctlr_position = 0; else raid_ctlr_position = nphysicals + nlogicals; @@ -2004,37 +4085,80 @@ n_ext_target_devs = 0; for (i = 0; i < nphysicals + nlogicals + 1; i++) { u8 *lunaddrbytes, is_OBDR = 0; + int rc = 0; + int phys_dev_index = i - (raid_ctlr_position == 0); + bool skip_device = false; + + physical_device = i < nphysicals + (raid_ctlr_position == 0); /* Figure out where the LUN ID info is coming from */ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, i, nphysicals, nlogicals, physdev_list, logdev_list); - /* skip masked physical devices. */ - if (lunaddrbytes[3] & 0xC0 && - i < nphysicals + (raid_ctlr_position == 0)) - continue; + + /* + * Skip over some devices such as a spare. + */ + if (!tmpdevice->external && physical_device) { + skip_device = hpsa_skip_device(h, lunaddrbytes, + &physdev_list->LUN[phys_dev_index]); + if (skip_device) + continue; + } /* Get device type, vendor, model, device id */ - if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, - &is_OBDR)) - continue; /* skip it if we can't talk to it. */ + rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, + &is_OBDR); + if (rc == -ENOMEM) { + dev_warn(&h->pdev->dev, + "Out of memory, rescan deferred.\n"); + h->drv_req_rescan = 1; + goto out; + } + if (rc) { + dev_warn(&h->pdev->dev, + "Inquiry failed, skipping device.\n"); + continue; + } + + /* Determine if this is a lun from an external target array */ + tmpdevice->external = + figure_external_status(h, raid_ctlr_position, i, + nphysicals, nlocal_logicals); + figure_bus_target_lun(h, lunaddrbytes, tmpdevice); + hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes); this_device = currentsd[ncurrent]; - /* - * For external target devices, we have to insert a LUN 0 which - * doesn't show up in CCISS_REPORT_PHYSICAL data, but there - * is nonetheless an enclosure device there. We have to - * present that otherwise linux won't find anything if - * there is no lun 0. + /* Turn on discovery_polling if there are ext target devices. + * Event-based change notification is unreliable for those. */ - if (add_ext_target_dev(h, tmpdevice, this_device, - lunaddrbytes, lunzerobits, - &n_ext_target_devs)) { - ncurrent++; - this_device = currentsd[ncurrent]; + if (!h->discovery_polling) { + if (tmpdevice->external) { + h->discovery_polling = 1; + dev_info(&h->pdev->dev, + "External target, activate discovery polling.\n"); + } } + *this_device = *tmpdevice; + this_device->physical_device = physical_device; + + /* + * Expose all devices except for physical devices that + * are masked. + */ + if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device) + this_device->expose_device = 0; + else + this_device->expose_device = 1; + + + /* + * Get the SAS address for physical devices that are exposed. + */ + if (this_device->physical_device && this_device->expose_device) + hpsa_get_sas_address(h, lunaddrbytes, this_device); switch (this_device->devtype) { case TYPE_ROM: @@ -2049,12 +4173,20 @@ ncurrent++; break; case TYPE_DISK: - if (i < nphysicals) - break; + if (this_device->physical_device) { + /* The disk is in HBA mode. */ + /* Never use RAID mapper in HBA mode. */ + this_device->offload_enabled = 0; + hpsa_get_ioaccel_drive_info(h, this_device, + physdev_list, phys_dev_index, id_phys); + hpsa_get_path_info(this_device, + physdev_list, phys_dev_index, id_phys); + } ncurrent++; break; case TYPE_TAPE: case TYPE_MEDIUM_CHANGER: + case TYPE_ENCLOSURE: ncurrent++; break; case TYPE_RAID: @@ -2073,7 +4205,19 @@ if (ncurrent >= HPSA_MAX_DEVICES) break; } - adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); + + if (h->sas_host == NULL) { + int rc = 0; + + rc = hpsa_add_sas_host(h); + if (rc) { + dev_warn(&h->pdev->dev, + "Could not add sas host %d\n", rc); + goto out; + } + } + + adjust_hpsa_scsi_table(h, currentsd, ncurrent); out: kfree(tmpdevice); for (i = 0; i < ndev_allocated; i++) @@ -2081,9 +4225,23 @@ kfree(currentsd); kfree(physdev_list); kfree(logdev_list); + kfree(id_ctlr); + kfree(id_phys); +} + +static void hpsa_set_sg_descriptor(struct SGDescriptor *desc, + struct scatterlist *sg) +{ + u64 addr64 = (u64) sg_dma_address(sg); + unsigned int len = sg_dma_len(sg); + + desc->Addr = cpu_to_le64(addr64); + desc->Len = cpu_to_le32(len); + desc->Ext = 0; } -/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci +/* + * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci * dma mapping and fills in the scatter gather entries of the * hpsa command, cp. */ @@ -2091,10 +4249,8 @@ struct CommandList *cp, struct scsi_cmnd *cmd) { - unsigned int len; struct scatterlist *sg; - u64 addr64; - int use_sg, i, sg_index, chained; + int use_sg, i, sg_limit, chained, last_sg; struct SGDescriptor *curr_sg; BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); @@ -2106,31 +4262,46 @@ if (!use_sg) goto sglist_finished; + /* + * If the number of entries is greater than the max for a single list, + * then we have a chained list; we will set up all but one entry in the + * first list (the last entry is saved for link information); + * otherwise, we don't have a chained list and we'll set up at each of + * the entries in the one list. + */ curr_sg = cp->SG; - chained = 0; - sg_index = 0; - scsi_for_each_sg(cmd, sg, use_sg, i) { - if (i == h->max_cmd_sg_entries - 1 && - use_sg > h->max_cmd_sg_entries) { - chained = 1; - curr_sg = h->cmd_sg_list[cp->cmdindex]; - sg_index = 0; - } - addr64 = (u64) sg_dma_address(sg); - len = sg_dma_len(sg); - curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); - curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); - curr_sg->Len = len; - curr_sg->Ext = 0; /* we are not chaining */ + chained = use_sg > h->max_cmd_sg_entries; + sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; + last_sg = scsi_sg_count(cmd) - 1; + scsi_for_each_sg(cmd, sg, sg_limit, i) { + hpsa_set_sg_descriptor(curr_sg, sg); curr_sg++; } + if (chained) { + /* + * Continue with the chained list. Set curr_sg to the chained + * list. Modify the limit to the total count less the entries + * we've already set up. Resume the scan at the list entry + * where the previous loop left off. + */ + curr_sg = h->cmd_sg_list[cp->cmdindex]; + sg_limit = use_sg - sg_limit; + for_each_sg(sg, sg, sg_limit, i) { + hpsa_set_sg_descriptor(curr_sg, sg); + curr_sg++; + } + } + + /* Back the pointer up to the last entry and mark it as "last". */ + (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST); + if (use_sg + chained > h->maxSG) h->maxSG = use_sg + chained; if (chained) { cp->Header.SGList = h->max_cmd_sg_entries; - cp->Header.SGTotal = (u16) (use_sg + 1); + cp->Header.SGTotal = cpu_to_le16(use_sg + 1); if (hpsa_map_sg_chain_block(h, cp)) { scsi_dma_unmap(cmd); return -1; @@ -2141,76 +4312,737 @@ sglist_finished: cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ - cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ + cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ return 0; } +#define IO_ACCEL_INELIGIBLE (1) +static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) +{ + int is_write = 0; + u32 block; + u32 block_cnt; + + /* Perform some CDB fixups if needed using 10 byte reads/writes only */ + switch (cdb[0]) { + case WRITE_6: + case WRITE_12: + is_write = 1; + case READ_6: + case READ_12: + if (*cdb_len == 6) { + block = get_unaligned_be16(&cdb[2]); + block_cnt = cdb[4]; + if (block_cnt == 0) + block_cnt = 256; + } else { + BUG_ON(*cdb_len != 12); + block = get_unaligned_be32(&cdb[2]); + block_cnt = get_unaligned_be32(&cdb[6]); + } + if (block_cnt > 0xffff) + return IO_ACCEL_INELIGIBLE; + + cdb[0] = is_write ? WRITE_10 : READ_10; + cdb[1] = 0; + cdb[2] = (u8) (block >> 24); + cdb[3] = (u8) (block >> 16); + cdb[4] = (u8) (block >> 8); + cdb[5] = (u8) (block); + cdb[6] = 0; + cdb[7] = (u8) (block_cnt >> 8); + cdb[8] = (u8) (block_cnt); + cdb[9] = 0; + *cdb_len = 10; + break; + } + return 0; +} -static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) +static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) { - struct ctlr_info *h; - struct hpsa_scsi_dev_t *dev; - unsigned char scsi3addr[8]; - struct CommandList *c; - unsigned long flags; + struct scsi_cmnd *cmd = c->scsi_cmd; + struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; + unsigned int len; + unsigned int total_len = 0; + struct scatterlist *sg; + u64 addr64; + int use_sg, i; + struct SGDescriptor *curr_sg; + u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; - /* Get the ptr to our adapter structure out of cmd->host. */ - h = sdev_to_hba(cmd->device); - dev = cmd->device->hostdata; - if (!dev) { - cmd->result = DID_NO_CONNECT << 16; - done(cmd); - return 0; + /* TODO: implement chaining support */ + if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; } - memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); - spin_lock_irqsave(&h->lock, flags); - if (unlikely(h->lockup_detected)) { - spin_unlock_irqrestore(&h->lock, flags); - cmd->result = DID_ERROR << 16; - done(cmd); - return 0; + BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); + + if (fixup_ioaccel_cdb(cdb, &cdb_len)) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; } - spin_unlock_irqrestore(&h->lock, flags); - c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return SCSI_MLQUEUE_HOST_BUSY; + + c->cmd_type = CMD_IOACCEL1; + + /* Adjust the DMA address to point to the accelerated command buffer */ + c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + + (c->cmdindex * sizeof(*cp)); + BUG_ON(c->busaddr & 0x0000007F); + + use_sg = scsi_dma_map(cmd); + if (use_sg < 0) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return use_sg; } - /* Fill in the command list header */ + if (use_sg) { + curr_sg = cp->SG; + scsi_for_each_sg(cmd, sg, use_sg, i) { + addr64 = (u64) sg_dma_address(sg); + len = sg_dma_len(sg); + total_len += len; + curr_sg->Addr = cpu_to_le64(addr64); + curr_sg->Len = cpu_to_le32(len); + curr_sg->Ext = cpu_to_le32(0); + curr_sg++; + } + (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); + + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + control |= IOACCEL1_CONTROL_DATA_OUT; + break; + case DMA_FROM_DEVICE: + control |= IOACCEL1_CONTROL_DATA_IN; + break; + case DMA_NONE: + control |= IOACCEL1_CONTROL_NODATAXFER; + break; + default: + dev_err(&h->pdev->dev, "unknown data direction: %d\n", + cmd->sc_data_direction); + BUG(); + break; + } + } else { + control |= IOACCEL1_CONTROL_NODATAXFER; + } - cmd->scsi_done = done; /* save this for use by completion code */ + c->Header.SGList = use_sg; + /* Fill out the command structure to submit */ + cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); + cp->transfer_len = cpu_to_le32(total_len); + cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | + (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); + cp->control = cpu_to_le32(control); + memcpy(cp->CDB, cdb, cdb_len); + memcpy(cp->CISS_LUN, scsi3addr, 8); + /* Tag was already set at init time. */ + enqueue_cmd_and_start_io(h, c); + return 0; +} - /* save c in case we have to abort it */ - cmd->host_scribble = (unsigned char *) c; +/* + * Queue a command directly to a device behind the controller using the + * I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, + struct CommandList *c) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + + c->phys_disk = dev; + + return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, + cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); +} + +/* + * Set encryption parameters for the ioaccel2 request + */ +static void set_encrypt_ioaccel2(struct ctlr_info *h, + struct CommandList *c, struct io_accel2_cmd *cp) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + struct raid_map_data *map = &dev->raid_map; + u64 first_block; + + /* Are we doing encryption on this device */ + if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) + return; + /* Set the data encryption key index. */ + cp->dekindex = map->dekindex; + + /* Set the encryption enable flag, encoded into direction field. */ + cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; + + /* Set encryption tweak values based on logical block address + * If block size is 512, tweak value is LBA. + * For other block sizes, tweak is (LBA * block size)/ 512) + */ + switch (cmd->cmnd[0]) { + /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ + case WRITE_6: + case READ_6: + first_block = get_unaligned_be16(&cmd->cmnd[2]); + break; + case WRITE_10: + case READ_10: + /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ + case WRITE_12: + case READ_12: + first_block = get_unaligned_be32(&cmd->cmnd[2]); + break; + case WRITE_16: + case READ_16: + first_block = get_unaligned_be64(&cmd->cmnd[2]); + break; + default: + dev_err(&h->pdev->dev, + "ERROR: %s: size (0x%x) not supported for encryption\n", + __func__, cmd->cmnd[0]); + BUG(); + break; + } + + if (le32_to_cpu(map->volume_blk_size) != 512) + first_block = first_block * + le32_to_cpu(map->volume_blk_size)/512; + + cp->tweak_lower = cpu_to_le32(first_block); + cp->tweak_upper = cpu_to_le32(first_block >> 32); +} + +static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct ioaccel2_sg_element *curr_sg; + int use_sg, i; + struct scatterlist *sg; + u64 addr64; + u32 len; + u32 total_len = 0; + + BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); + + if (fixup_ioaccel_cdb(cdb, &cdb_len)) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + + c->cmd_type = CMD_IOACCEL2; + /* Adjust the DMA address to point to the accelerated command buffer */ + c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + + (c->cmdindex * sizeof(*cp)); + BUG_ON(c->busaddr & 0x0000007F); + + memset(cp, 0, sizeof(*cp)); + cp->IU_type = IOACCEL2_IU_TYPE; + + use_sg = scsi_dma_map(cmd); + if (use_sg < 0) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return use_sg; + } + + if (use_sg) { + curr_sg = cp->sg; + if (use_sg > h->ioaccel_maxsg) { + addr64 = le64_to_cpu( + h->ioaccel2_cmd_sg_list[c->cmdindex]->address); + curr_sg->address = cpu_to_le64(addr64); + curr_sg->length = 0; + curr_sg->reserved[0] = 0; + curr_sg->reserved[1] = 0; + curr_sg->reserved[2] = 0; + curr_sg->chain_indicator = 0x80; + + curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; + } + scsi_for_each_sg(cmd, sg, use_sg, i) { + addr64 = (u64) sg_dma_address(sg); + len = sg_dma_len(sg); + total_len += len; + curr_sg->address = cpu_to_le64(addr64); + curr_sg->length = cpu_to_le32(len); + curr_sg->reserved[0] = 0; + curr_sg->reserved[1] = 0; + curr_sg->reserved[2] = 0; + curr_sg->chain_indicator = 0; + curr_sg++; + } + + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_DATA_OUT; + break; + case DMA_FROM_DEVICE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_DATA_IN; + break; + case DMA_NONE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_NO_DATA; + break; + default: + dev_err(&h->pdev->dev, "unknown data direction: %d\n", + cmd->sc_data_direction); + BUG(); + break; + } + } else { + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_NO_DATA; + } + + /* Set encryption parameters, if necessary */ + set_encrypt_ioaccel2(h, c, cp); + + cp->scsi_nexus = cpu_to_le32(ioaccel_handle); + cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); + memcpy(cp->cdb, cdb, sizeof(cp->cdb)); + + cp->data_len = cpu_to_le32(total_len); + cp->err_ptr = cpu_to_le64(c->busaddr + + offsetof(struct io_accel2_cmd, error_data)); + cp->err_len = cpu_to_le32(sizeof(cp->error_data)); + + /* fill in sg elements */ + if (use_sg > h->ioaccel_maxsg) { + cp->sg_count = 1; + cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0])); + if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + scsi_dma_unmap(cmd); + return -1; + } + } else + cp->sg_count = (u8) use_sg; + + enqueue_cmd_and_start_io(h, c); + return 0; +} + +/* + * Queue a command to the correct I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) +{ + /* Try to honor the device's queue depth */ + if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > + phys_disk->queue_depth) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + return IO_ACCEL_INELIGIBLE; + } + if (h->transMethod & CFGTBL_Trans_io_accel1) + return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, + cdb, cdb_len, scsi3addr, + phys_disk); + else + return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, + cdb, cdb_len, scsi3addr, + phys_disk); +} + +static void raid_map_helper(struct raid_map_data *map, + int offload_to_mirror, u32 *map_index, u32 *current_group) +{ + if (offload_to_mirror == 0) { + /* use physical disk in the first mirrored group. */ + *map_index %= le16_to_cpu(map->data_disks_per_row); + return; + } + do { + /* determine mirror group that *map_index indicates */ + *current_group = *map_index / + le16_to_cpu(map->data_disks_per_row); + if (offload_to_mirror == *current_group) + continue; + if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { + /* select map index from next group */ + *map_index += le16_to_cpu(map->data_disks_per_row); + (*current_group)++; + } else { + /* select map index from first group */ + *map_index %= le16_to_cpu(map->data_disks_per_row); + *current_group = 0; + } + } while (offload_to_mirror != *current_group); +} + +/* + * Attempt to perform offload RAID mapping for a logical volume I/O. + */ +static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, + struct CommandList *c) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + struct raid_map_data *map = &dev->raid_map; + struct raid_map_disk_data *dd = &map->data[0]; + int is_write = 0; + u32 map_index; + u64 first_block, last_block; + u32 block_cnt; + u32 blocks_per_row; + u64 first_row, last_row; + u32 first_row_offset, last_row_offset; + u32 first_column, last_column; + u64 r0_first_row, r0_last_row; + u32 r5or6_blocks_per_row; + u64 r5or6_first_row, r5or6_last_row; + u32 r5or6_first_row_offset, r5or6_last_row_offset; + u32 r5or6_first_column, r5or6_last_column; + u32 total_disks_per_row; + u32 stripesize; + u32 first_group, last_group, current_group; + u32 map_row; + u32 disk_handle; + u64 disk_block; + u32 disk_block_cnt; + u8 cdb[16]; + u8 cdb_len; + u16 strip_size; +#if BITS_PER_LONG == 32 + u64 tmpdiv; +#endif + int offload_to_mirror; + + /* check for valid opcode, get LBA and block count */ + switch (cmd->cmnd[0]) { + case WRITE_6: + is_write = 1; + case READ_6: + first_block = get_unaligned_be16(&cmd->cmnd[2]); + block_cnt = cmd->cmnd[4]; + if (block_cnt == 0) + block_cnt = 256; + break; + case WRITE_10: + is_write = 1; + case READ_10: + first_block = + (((u64) cmd->cmnd[2]) << 24) | + (((u64) cmd->cmnd[3]) << 16) | + (((u64) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + block_cnt = + (((u32) cmd->cmnd[7]) << 8) | + cmd->cmnd[8]; + break; + case WRITE_12: + is_write = 1; + case READ_12: + first_block = + (((u64) cmd->cmnd[2]) << 24) | + (((u64) cmd->cmnd[3]) << 16) | + (((u64) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + block_cnt = + (((u32) cmd->cmnd[6]) << 24) | + (((u32) cmd->cmnd[7]) << 16) | + (((u32) cmd->cmnd[8]) << 8) | + cmd->cmnd[9]; + break; + case WRITE_16: + is_write = 1; + case READ_16: + first_block = + (((u64) cmd->cmnd[2]) << 56) | + (((u64) cmd->cmnd[3]) << 48) | + (((u64) cmd->cmnd[4]) << 40) | + (((u64) cmd->cmnd[5]) << 32) | + (((u64) cmd->cmnd[6]) << 24) | + (((u64) cmd->cmnd[7]) << 16) | + (((u64) cmd->cmnd[8]) << 8) | + cmd->cmnd[9]; + block_cnt = + (((u32) cmd->cmnd[10]) << 24) | + (((u32) cmd->cmnd[11]) << 16) | + (((u32) cmd->cmnd[12]) << 8) | + cmd->cmnd[13]; + break; + default: + return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ + } + last_block = first_block + block_cnt - 1; + + /* check for write to non-RAID-0 */ + if (is_write && dev->raid_level != 0) + return IO_ACCEL_INELIGIBLE; + + /* check for invalid block or wraparound */ + if (last_block >= le64_to_cpu(map->volume_blk_cnt) || + last_block < first_block) + return IO_ACCEL_INELIGIBLE; + + /* calculate stripe information for the request */ + blocks_per_row = le16_to_cpu(map->data_disks_per_row) * + le16_to_cpu(map->strip_size); + strip_size = le16_to_cpu(map->strip_size); +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + (void) do_div(tmpdiv, blocks_per_row); + first_row = tmpdiv; + tmpdiv = last_block; + (void) do_div(tmpdiv, blocks_per_row); + last_row = tmpdiv; + first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); + last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); + tmpdiv = first_row_offset; + (void) do_div(tmpdiv, strip_size); + first_column = tmpdiv; + tmpdiv = last_row_offset; + (void) do_div(tmpdiv, strip_size); + last_column = tmpdiv; +#else + first_row = first_block / blocks_per_row; + last_row = last_block / blocks_per_row; + first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); + last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); + first_column = first_row_offset / strip_size; + last_column = last_row_offset / strip_size; +#endif + + /* if this isn't a single row/column then give to the controller */ + if ((first_row != last_row) || (first_column != last_column)) + return IO_ACCEL_INELIGIBLE; + + /* proceeding with driver mapping */ + total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + + le16_to_cpu(map->metadata_disks_per_row); + map_row = ((u32)(first_row >> map->parity_rotation_shift)) % + le16_to_cpu(map->row_cnt); + map_index = (map_row * total_disks_per_row) + first_column; + + switch (dev->raid_level) { + case HPSA_RAID_0: + break; /* nothing special to do */ + case HPSA_RAID_1: + /* Handles load balance across RAID 1 members. + * (2-drive R1 and R10 with even # of drives.) + * Appropriate for SSDs, not optimal for HDDs + */ + BUG_ON(le16_to_cpu(map->layout_map_count) != 2); + if (dev->offload_to_mirror) + map_index += le16_to_cpu(map->data_disks_per_row); + dev->offload_to_mirror = !dev->offload_to_mirror; + break; + case HPSA_RAID_ADM: + /* Handles N-way mirrors (R1-ADM) + * and R10 with # of drives divisible by 3.) + */ + BUG_ON(le16_to_cpu(map->layout_map_count) != 3); + + offload_to_mirror = dev->offload_to_mirror; + raid_map_helper(map, offload_to_mirror, + &map_index, ¤t_group); + /* set mirror group to use next time */ + offload_to_mirror = + (offload_to_mirror >= + le16_to_cpu(map->layout_map_count) - 1) + ? 0 : offload_to_mirror + 1; + dev->offload_to_mirror = offload_to_mirror; + /* Avoid direct use of dev->offload_to_mirror within this + * function since multiple threads might simultaneously + * increment it beyond the range of dev->layout_map_count -1. + */ + break; + case HPSA_RAID_5: + case HPSA_RAID_6: + if (le16_to_cpu(map->layout_map_count) <= 1) + break; + + /* Verify first and last block are in same RAID group */ + r5or6_blocks_per_row = + le16_to_cpu(map->strip_size) * + le16_to_cpu(map->data_disks_per_row); + BUG_ON(r5or6_blocks_per_row == 0); + stripesize = r5or6_blocks_per_row * + le16_to_cpu(map->layout_map_count); +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + first_group = do_div(tmpdiv, stripesize); + tmpdiv = first_group; + (void) do_div(tmpdiv, r5or6_blocks_per_row); + first_group = tmpdiv; + tmpdiv = last_block; + last_group = do_div(tmpdiv, stripesize); + tmpdiv = last_group; + (void) do_div(tmpdiv, r5or6_blocks_per_row); + last_group = tmpdiv; +#else + first_group = (first_block % stripesize) / r5or6_blocks_per_row; + last_group = (last_block % stripesize) / r5or6_blocks_per_row; +#endif + if (first_group != last_group) + return IO_ACCEL_INELIGIBLE; + + /* Verify request is in a single row of RAID 5/6 */ +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + (void) do_div(tmpdiv, stripesize); + first_row = r5or6_first_row = r0_first_row = tmpdiv; + tmpdiv = last_block; + (void) do_div(tmpdiv, stripesize); + r5or6_last_row = r0_last_row = tmpdiv; +#else + first_row = r5or6_first_row = r0_first_row = + first_block / stripesize; + r5or6_last_row = r0_last_row = last_block / stripesize; +#endif + if (r5or6_first_row != r5or6_last_row) + return IO_ACCEL_INELIGIBLE; + + + /* Verify request is in a single column */ +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + first_row_offset = do_div(tmpdiv, stripesize); + tmpdiv = first_row_offset; + first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); + r5or6_first_row_offset = first_row_offset; + tmpdiv = last_block; + r5or6_last_row_offset = do_div(tmpdiv, stripesize); + tmpdiv = r5or6_last_row_offset; + r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); + tmpdiv = r5or6_first_row_offset; + (void) do_div(tmpdiv, map->strip_size); + first_column = r5or6_first_column = tmpdiv; + tmpdiv = r5or6_last_row_offset; + (void) do_div(tmpdiv, map->strip_size); + r5or6_last_column = tmpdiv; +#else + first_row_offset = r5or6_first_row_offset = + (u32)((first_block % stripesize) % + r5or6_blocks_per_row); + + r5or6_last_row_offset = + (u32)((last_block % stripesize) % + r5or6_blocks_per_row); + + first_column = r5or6_first_column = + r5or6_first_row_offset / le16_to_cpu(map->strip_size); + r5or6_last_column = + r5or6_last_row_offset / le16_to_cpu(map->strip_size); +#endif + if (r5or6_first_column != r5or6_last_column) + return IO_ACCEL_INELIGIBLE; + /* Request is eligible */ + map_row = ((u32)(first_row >> map->parity_rotation_shift)) % + le16_to_cpu(map->row_cnt); + + map_index = (first_group * + (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + + (map_row * total_disks_per_row) + first_column; + break; + default: + return IO_ACCEL_INELIGIBLE; + } + + if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) + return IO_ACCEL_INELIGIBLE; + + c->phys_disk = dev->phys_disk[map_index]; + + disk_handle = dd[map_index].ioaccel_handle; + disk_block = le64_to_cpu(map->disk_starting_blk) + + first_row * le16_to_cpu(map->strip_size) + + (first_row_offset - first_column * + le16_to_cpu(map->strip_size)); + disk_block_cnt = block_cnt; + + /* handle differing logical/physical block sizes */ + if (map->phys_blk_shift) { + disk_block <<= map->phys_blk_shift; + disk_block_cnt <<= map->phys_blk_shift; + } + BUG_ON(disk_block_cnt > 0xffff); + + /* build the new CDB for the physical disk I/O */ + if (disk_block > 0xffffffff) { + cdb[0] = is_write ? WRITE_16 : READ_16; + cdb[1] = 0; + cdb[2] = (u8) (disk_block >> 56); + cdb[3] = (u8) (disk_block >> 48); + cdb[4] = (u8) (disk_block >> 40); + cdb[5] = (u8) (disk_block >> 32); + cdb[6] = (u8) (disk_block >> 24); + cdb[7] = (u8) (disk_block >> 16); + cdb[8] = (u8) (disk_block >> 8); + cdb[9] = (u8) (disk_block); + cdb[10] = (u8) (disk_block_cnt >> 24); + cdb[11] = (u8) (disk_block_cnt >> 16); + cdb[12] = (u8) (disk_block_cnt >> 8); + cdb[13] = (u8) (disk_block_cnt); + cdb[14] = 0; + cdb[15] = 0; + cdb_len = 16; + } else { + cdb[0] = is_write ? WRITE_10 : READ_10; + cdb[1] = 0; + cdb[2] = (u8) (disk_block >> 24); + cdb[3] = (u8) (disk_block >> 16); + cdb[4] = (u8) (disk_block >> 8); + cdb[5] = (u8) (disk_block); + cdb[6] = 0; + cdb[7] = (u8) (disk_block_cnt >> 8); + cdb[8] = (u8) (disk_block_cnt); + cdb[9] = 0; + cdb_len = 10; + } + return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, + dev->scsi3addr, + dev->phys_disk[map_index]); +} + +/* + * Submit commands down the "normal" RAID stack path + * All callers to hpsa_ciss_submit must check lockup_detected + * beforehand, before (opt.) and after calling cmd_alloc + */ +static int hpsa_ciss_submit(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd, + unsigned char scsi3addr[]) +{ + cmd->host_scribble = (unsigned char *) c; c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; c->Header.ReplyQueue = 0; /* unused in simple mode */ memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); - c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); - c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; + c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); /* Fill in the request block... */ c->Request.Timeout = 0; - memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); c->Request.CDBLen = cmd->cmd_len; memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); - c->Request.Type.Type = TYPE_CMD; - c->Request.Type.Attribute = ATTR_SIMPLE; switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: - c->Request.Type.Direction = XFER_WRITE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); break; case DMA_FROM_DEVICE: - c->Request.Type.Direction = XFER_READ; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); break; case DMA_NONE: - c->Request.Type.Direction = XFER_NONE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); break; case DMA_BIDIRECTIONAL: /* This can happen if a buggy application does a scsi passthru @@ -2218,7 +5050,8 @@ * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) */ - c->Request.Type.Direction = XFER_RSVD; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); /* This is technically wrong, and hpsa controllers should * reject it with CMD_INVALID, which is the most correct * response, but non-fibre backends appear to let it @@ -2237,7 +5070,7 @@ } if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ - cmd_free(h, c); + hpsa_cmd_resolve_and_free(h, c); return SCSI_MLQUEUE_HOST_BUSY; } enqueue_cmd_and_start_io(h, c); @@ -2245,13 +5078,203 @@ return 0; } -static DEF_SCSI_QCMD(hpsa_scsi_queue_command) +static void hpsa_cmd_init(struct ctlr_info *h, int index, + struct CommandList *c) +{ + dma_addr_t cmd_dma_handle, err_dma_handle; + + /* Zero out all of commandlist except the last field, refcount */ + memset(c, 0, offsetof(struct CommandList, refcount)); + c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); + cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); + c->err_info = h->errinfo_pool + index; + memset(c->err_info, 0, sizeof(*c->err_info)); + err_dma_handle = h->errinfo_pool_dhandle + + index * sizeof(*c->err_info); + c->cmdindex = index; + c->busaddr = (u32) cmd_dma_handle; + c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); + c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); + c->h = h; + c->scsi_cmd = SCSI_CMD_IDLE; +} + +static void hpsa_preinitialize_commands(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < h->nr_cmds; i++) { + struct CommandList *c = h->cmd_pool + i; + + hpsa_cmd_init(h, i, c); + atomic_set(&c->refcount, 0); + } +} + +static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, + struct CommandList *c) +{ + dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); + + BUG_ON(c->cmdindex != index); + + memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); + memset(c->err_info, 0, sizeof(*c->err_info)); + c->busaddr = (u32) cmd_dma_handle; +} + +static int hpsa_ioaccel_submit(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd, + unsigned char *scsi3addr) +{ + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + int rc = IO_ACCEL_INELIGIBLE; + + cmd->host_scribble = (unsigned char *) c; + + if (dev->offload_enabled) { + hpsa_cmd_init(h, c->cmdindex, c); + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + rc = hpsa_scsi_ioaccel_raid_map(h, c); + if (rc < 0) /* scsi_dma_map failed. */ + rc = SCSI_MLQUEUE_HOST_BUSY; + } else if (dev->hba_ioaccel_enabled) { + hpsa_cmd_init(h, c->cmdindex, c); + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + rc = hpsa_scsi_ioaccel_direct_map(h, c); + if (rc < 0) /* scsi_dma_map failed. */ + rc = SCSI_MLQUEUE_HOST_BUSY; + } + return rc; +} + +static void hpsa_command_resubmit_worker(struct work_struct *work) +{ + struct scsi_cmnd *cmd; + struct hpsa_scsi_dev_t *dev; + struct CommandList *c = container_of(work, struct CommandList, work); + + cmd = c->scsi_cmd; + dev = cmd->device->hostdata; + if (!dev) { + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(c->h, c, cmd); + } + if (c->reset_pending) + return hpsa_cmd_resolve_and_free(c->h, c); + if (c->abort_pending) + return hpsa_cmd_abort_and_free(c->h, c, cmd); + if (c->cmd_type == CMD_IOACCEL2) { + struct ctlr_info *h = c->h; + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + int rc; + + if (c2->error_data.serv_response == + IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { + rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); + if (rc == 0) + return; + if (rc == SCSI_MLQUEUE_HOST_BUSY) { + /* + * If we get here, it means dma mapping failed. + * Try again via scsi mid layer, which will + * then get SCSI_MLQUEUE_HOST_BUSY. + */ + cmd->result = DID_IMM_RETRY << 16; + return hpsa_cmd_free_and_done(h, c, cmd); + } + /* else, fall thru and resubmit down CISS path */ + } + } + hpsa_cmd_partial_init(c->h, c->cmdindex, c); + if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { + /* + * If we get here, it means dma mapping failed. Try + * again via scsi mid layer, which will then get + * SCSI_MLQUEUE_HOST_BUSY. + * + * hpsa_ciss_submit will have already freed c + * if it encountered a dma mapping failure. + */ + cmd->result = DID_IMM_RETRY << 16; + cmd->scsi_done(cmd); + } +} + +/* Running in struct Scsi_Host->host_lock less mode */ +static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) +{ + struct ctlr_info *h; + struct hpsa_scsi_dev_t *dev; + unsigned char scsi3addr[8]; + struct CommandList *c; + int rc = 0; + + /* Get the ptr to our adapter structure out of cmd->host. */ + h = sdev_to_hba(cmd->device); + + BUG_ON(cmd->request->tag < 0); + + dev = cmd->device->hostdata; + if (!dev) { + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + return 0; + } + + memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); + + if (unlikely(lockup_detected(h))) { + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + return 0; + } + c = cmd_tagged_alloc(h, cmd); + + /* + * Call alternate submit routine for I/O accelerated commands. + * Retries always go down the normal I/O path. + */ + if (likely(cmd->retries == 0 && + cmd->request->cmd_type == REQ_TYPE_FS && + h->acciopath_status)) { + rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); + if (rc == 0) + return 0; + if (rc == SCSI_MLQUEUE_HOST_BUSY) { + hpsa_cmd_resolve_and_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + return hpsa_ciss_submit(h, c, cmd, scsi3addr); +} + +static void hpsa_scan_complete(struct ctlr_info *h) +{ + unsigned long flags; + + spin_lock_irqsave(&h->scan_lock, flags); + h->scan_finished = 1; + wake_up_all(&h->scan_wait_queue); + spin_unlock_irqrestore(&h->scan_lock, flags); +} static void hpsa_scan_start(struct Scsi_Host *sh) { struct ctlr_info *h = shost_to_hba(sh); unsigned long flags; + /* + * Don't let rescans be initiated on a controller known to be locked + * up. If the controller locks up *during* a rescan, that thread is + * probably hosed, but at least we can prevent new rescan threads from + * piling up on a locked up controller. + */ + if (unlikely(lockup_detected(h))) + return hpsa_scan_complete(h); + /* wait until any scan already in progress is finished. */ while (1) { spin_lock_irqsave(&h->scan_lock, flags); @@ -2268,12 +5291,27 @@ h->scan_finished = 0; /* mark scan as in progress */ spin_unlock_irqrestore(&h->scan_lock, flags); - hpsa_update_scsi_devices(h, h->scsi_host->host_no); + if (unlikely(lockup_detected(h))) + return hpsa_scan_complete(h); - spin_lock_irqsave(&h->scan_lock, flags); - h->scan_finished = 1; /* mark scan as finished. */ - wake_up_all(&h->scan_wait_queue); - spin_unlock_irqrestore(&h->scan_lock, flags); + hpsa_update_scsi_devices(h); + + hpsa_scan_complete(h); +} + +static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; + + if (!logical_drive) + return -ENODEV; + + if (qdepth < 1) + qdepth = 1; + else if (qdepth > logical_drive->queue_depth) + qdepth = logical_drive->queue_depth; + + return scsi_change_queue_depth(sdev, qdepth); } static int hpsa_scan_finished(struct Scsi_Host *sh, @@ -2289,39 +5327,15 @@ return finished; } -static int hpsa_change_queue_depth(struct scsi_device *sdev, - int qdepth, int reason) -{ - struct ctlr_info *h = sdev_to_hba(sdev); - - if (reason != SCSI_QDEPTH_DEFAULT) - return -ENOTSUPP; - - if (qdepth < 1) - qdepth = 1; - else - if (qdepth > h->nr_cmds) - qdepth = h->nr_cmds; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - return sdev->queue_depth; -} - -static void hpsa_unregister_scsi(struct ctlr_info *h) -{ - /* we are being forcibly unloaded, and may not refuse. */ - scsi_remove_host(h->scsi_host); - scsi_host_put(h->scsi_host); - h->scsi_host = NULL; -} - -static int hpsa_register_scsi(struct ctlr_info *h) +static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; - int error; sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); - if (sh == NULL) - goto fail; + if (sh == NULL) { + dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); + return -ENOMEM; + } sh->io_port = 0; sh->n_io_port = 0; @@ -2330,76 +5344,150 @@ sh->max_cmd_len = MAX_COMMAND_SIZE; sh->max_lun = HPSA_MAX_LUN; sh->max_id = HPSA_MAX_LUN; - sh->can_queue = h->nr_cmds; - sh->cmd_per_lun = h->nr_cmds; + sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; + sh->cmd_per_lun = sh->can_queue; sh->sg_tablesize = h->maxsgentries; - h->scsi_host = sh; + sh->transportt = hpsa_sas_transport_template; sh->hostdata[0] = (unsigned long) h; sh->irq = h->intr[h->intr_mode]; sh->unique_id = sh->irq; - error = scsi_add_host(sh, &h->pdev->dev); - if (error) - goto fail_host_put; - scsi_scan_host(sh); + + h->scsi_host = sh; return 0; +} - fail_host_put: - dev_err(&h->pdev->dev, "%s: scsi_add_host" - " failed for controller %d\n", __func__, h->ctlr); - scsi_host_put(sh); - return error; - fail: - dev_err(&h->pdev->dev, "%s: scsi_host_alloc" - " failed for controller %d\n", __func__, h->ctlr); - return -ENOMEM; +static int hpsa_scsi_add_host(struct ctlr_info *h) +{ + int rv; + + rv = scsi_add_host(h->scsi_host, &h->pdev->dev); + if (rv) { + dev_err(&h->pdev->dev, "scsi_add_host failed\n"); + return rv; + } + scsi_scan_host(h->scsi_host); + return 0; } -static int wait_for_device_to_become_ready(struct ctlr_info *h, - unsigned char lunaddr[]) +/* + * The block layer has already gone to the trouble of picking out a unique, + * small-integer tag for this request. We use an offset from that value as + * an index to select our command block. (The offset allows us to reserve the + * low-numbered entries for our own uses.) + */ +static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) { - int rc = 0; + int idx = scmd->request->tag; + + if (idx < 0) + return idx; + + /* Offset to leave space for internal cmds. */ + return idx += HPSA_NRESERVED_CMDS; +} + +/* + * Send a TEST_UNIT_READY command to the specified LUN using the specified + * reply queue; returns zero if the unit is ready, and non-zero otherwise. + */ +static int hpsa_send_test_unit_ready(struct ctlr_info *h, + struct CommandList *c, unsigned char lunaddr[], + int reply_queue) +{ + int rc; + + /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ + (void) fill_cmd(c, TEST_UNIT_READY, h, + NULL, 0, 0, lunaddr, TYPE_CMD); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + if (rc) + return rc; + /* no unmap needed here because no data xfer. */ + + /* Check if the unit is already ready. */ + if (c->err_info->CommandStatus == CMD_SUCCESS) + return 0; + + /* + * The first command sent after reset will receive "unit attention" to + * indicate that the LUN has been reset...this is actually what we're + * looking for (but, success is good too). + */ + if (c->err_info->CommandStatus == CMD_TARGET_STATUS && + c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && + (c->err_info->SenseInfo[2] == NO_SENSE || + c->err_info->SenseInfo[2] == UNIT_ATTENTION)) + return 0; + + return 1; +} + +/* + * Wait for a TEST_UNIT_READY command to complete, retrying as necessary; + * returns zero when the unit is ready, and non-zero when giving up. + */ +static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, + struct CommandList *c, + unsigned char lunaddr[], int reply_queue) +{ + int rc; int count = 0; int waittime = 1; /* seconds */ - struct CommandList *c; - - c = cmd_special_alloc(h); - if (!c) { - dev_warn(&h->pdev->dev, "out of memory in " - "wait_for_device_to_become_ready.\n"); - return IO_ERROR; - } /* Send test unit ready until device ready, or give up. */ - while (count < HPSA_TUR_RETRY_LIMIT) { + for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) { - /* Wait for a bit. do this first, because if we send + /* + * Wait for a bit. do this first, because if we send * the TUR right away, the reset will just abort it. */ msleep(1000 * waittime); - count++; + + rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); + if (!rc) + break; /* Increase wait time with each try, up to a point. */ if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) - waittime = waittime * 2; + waittime *= 2; - /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ - (void) fill_cmd(c, TEST_UNIT_READY, h, - NULL, 0, 0, lunaddr, TYPE_CMD); - hpsa_scsi_do_simple_cmd_core(h, c); - /* no unmap needed here because no data xfer. */ + dev_warn(&h->pdev->dev, + "waiting %d secs for device to become ready.\n", + waittime); + } - if (c->err_info->CommandStatus == CMD_SUCCESS) - break; + return rc; +} - if (c->err_info->CommandStatus == CMD_TARGET_STATUS && - c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && - (c->err_info->SenseInfo[2] == NO_SENSE || - c->err_info->SenseInfo[2] == UNIT_ATTENTION)) - break; +static int wait_for_device_to_become_ready(struct ctlr_info *h, + unsigned char lunaddr[], + int reply_queue) +{ + int first_queue; + int last_queue; + int rq; + int rc = 0; + struct CommandList *c; + + c = cmd_alloc(h); + + /* + * If no specific reply queue was requested, then send the TUR + * repeatedly, requesting a reply on each reply queue; otherwise execute + * the loop exactly once using only the specified queue. + */ + if (reply_queue == DEFAULT_REPLY_QUEUE) { + first_queue = 0; + last_queue = h->nreply_queues - 1; + } else { + first_queue = reply_queue; + last_queue = reply_queue; + } - dev_warn(&h->pdev->dev, "waiting %d secs " - "for device to become ready.\n", waittime); - rc = 1; /* device not ready. */ + for (rq = first_queue; rq <= last_queue; rq++) { + rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); + if (rc) + break; } if (rc) @@ -2407,7 +5495,7 @@ else dev_warn(&h->pdev->dev, "device is ready.\n"); - cmd_special_free(h, c); + cmd_free(h, c); return rc; } @@ -2419,26 +5507,65 @@ int rc; struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; + u8 reset_type; + char msg[48]; /* find the controller to which the command to be aborted was sent */ h = sdev_to_hba(scsicmd->device); if (h == NULL) /* paranoia */ return FAILED; + + if (lockup_detected(h)) + return FAILED; + dev = scsicmd->device->hostdata; if (!dev) { - dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " - "device lookup failed.\n"); + dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); return FAILED; } - dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", - h->scsi_host->host_no, dev->bus, dev->target, dev->lun); - /* send a reset to the SCSI LUN which the command was sent to */ - rc = hpsa_send_reset(h, dev->scsi3addr); - if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) + + /* if controller locked up, we can guarantee command won't complete */ + if (lockup_detected(h)) { + snprintf(msg, sizeof(msg), + "cmd %d RESET FAILED, lockup detected", + hpsa_get_cmd_index(scsicmd)); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + return FAILED; + } + + /* this reset request might be the result of a lockup; check */ + if (detect_controller_lockup(h)) { + snprintf(msg, sizeof(msg), + "cmd %d RESET FAILED, new lockup detected", + hpsa_get_cmd_index(scsicmd)); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + return FAILED; + } + + /* Do not attempt on controller */ + if (is_hba_lunid(dev->scsi3addr)) return SUCCESS; - dev_warn(&h->pdev->dev, "resetting device failed.\n"); - return FAILED; + if (is_logical_dev_addr_mode(dev->scsi3addr)) + reset_type = HPSA_DEVICE_RESET_MSG; + else + reset_type = HPSA_PHYS_TARGET_RESET; + + sprintf(msg, "resetting %s", + reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + + h->reset_in_progress = 1; + + /* send a reset to the SCSI LUN which the command was sent to */ + rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, + DEFAULT_REPLY_QUEUE); + sprintf(msg, "reset %s %s", + reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ", + rc == 0 ? "completed successfully" : "failed"); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + h->reset_in_progress = 0; + return rc == 0 ? SUCCESS : FAILED; } static void swizzle_abort_tag(u8 *tag) @@ -2456,134 +5583,283 @@ tag[7] = original_tag[4]; } +static void hpsa_get_tag(struct ctlr_info *h, + struct CommandList *c, __le32 *taglower, __le32 *tagupper) +{ + u64 tag; + if (c->cmd_type == CMD_IOACCEL1) { + struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) + &h->ioaccel_cmd_pool[c->cmdindex]; + tag = le64_to_cpu(cm1->tag); + *tagupper = cpu_to_le32(tag >> 32); + *taglower = cpu_to_le32(tag); + return; + } + if (c->cmd_type == CMD_IOACCEL2) { + struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) + &h->ioaccel2_cmd_pool[c->cmdindex]; + /* upper tag not used in ioaccel2 mode */ + memset(tagupper, 0, sizeof(*tagupper)); + *taglower = cm2->Tag; + return; + } + tag = le64_to_cpu(c->Header.tag); + *tagupper = cpu_to_le32(tag >> 32); + *taglower = cpu_to_le32(tag); +} + static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, - struct CommandList *abort, int swizzle) + struct CommandList *abort, int reply_queue) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; + __le32 tagupper, taglower; - c = cmd_special_alloc(h); - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); - return -ENOMEM; - } + c = cmd_alloc(h); /* fill_cmd can't fail here, no buffer to map */ - (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, + (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag, 0, 0, scsi3addr, TYPE_MSG); - if (swizzle) + if (h->needs_abort_tags_swizzled) swizzle_abort_tag(&c->Request.CDB[4]); - hpsa_scsi_do_simple_cmd_core(h, c); - dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", - __func__, abort->Header.Tag.upper, abort->Header.Tag.lower); + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + hpsa_get_tag(h, abort, &taglower, &tagupper); + dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", + __func__, tagupper, taglower); /* no unmap needed here because no data xfer. */ ei = c->err_info; switch (ei->CommandStatus) { case CMD_SUCCESS: break; + case CMD_TMF_STATUS: + rc = hpsa_evaluate_tmf_status(h, c); + break; case CMD_UNABORTABLE: /* Very common, don't make noise. */ rc = -1; break; default: dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", - __func__, abort->Header.Tag.upper, - abort->Header.Tag.lower); - hpsa_scsi_interpret_error(c); + __func__, tagupper, taglower); + hpsa_scsi_interpret_error(h, c); rc = -1; break; } - cmd_special_free(h, c); - dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, - abort->Header.Tag.upper, abort->Header.Tag.lower); + cmd_free(h, c); + dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", + __func__, tagupper, taglower); return rc; } -/* - * hpsa_find_cmd_in_queue - * - * Used to determine whether a command (find) is still present - * in queue_head. Optionally excludes the last element of queue_head. - * - * This is used to avoid unnecessary aborts. Commands in h->reqQ have - * not yet been submitted, and so can be aborted by the driver without - * sending an abort to the hardware. - * - * Returns pointer to command if found in queue, NULL otherwise. +static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h, + struct CommandList *command_to_abort, int reply_queue) +{ + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; + struct io_accel2_cmd *c2a = + &h->ioaccel2_cmd_pool[command_to_abort->cmdindex]; + struct scsi_cmnd *scmd = command_to_abort->scsi_cmd; + struct hpsa_scsi_dev_t *dev = scmd->device->hostdata; + + /* + * We're overlaying struct hpsa_tmf_struct on top of something which + * was allocated as a struct io_accel2_cmd, so we better be sure it + * actually fits, and doesn't overrun the error info space. + */ + BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) > + sizeof(struct io_accel2_cmd)); + BUG_ON(offsetof(struct io_accel2_cmd, error_data) < + offsetof(struct hpsa_tmf_struct, error_len) + + sizeof(ac->error_len)); + + c->cmd_type = IOACCEL2_TMF; + c->scsi_cmd = SCSI_CMD_BUSY; + + /* Adjust the DMA address to point to the accelerated command buffer */ + c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + + (c->cmdindex * sizeof(struct io_accel2_cmd)); + BUG_ON(c->busaddr & 0x0000007F); + + memset(ac, 0, sizeof(*c2)); /* yes this is correct */ + ac->iu_type = IOACCEL2_IU_TMF_TYPE; + ac->reply_queue = reply_queue; + ac->tmf = IOACCEL2_TMF_ABORT; + ac->it_nexus = cpu_to_le32(dev->ioaccel_handle); + memset(ac->lun_id, 0, sizeof(ac->lun_id)); + ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT); + ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag)); + ac->error_ptr = cpu_to_le64(c->busaddr + + offsetof(struct io_accel2_cmd, error_data)); + ac->error_len = cpu_to_le32(sizeof(c2->error_data)); +} + +/* ioaccel2 path firmware cannot handle abort task requests. + * Change abort requests to physical target reset, and send to the + * address of the physical disk used for the ioaccel 2 command. + * Return 0 on success (IO_OK) + * -1 on failure */ -static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h, - struct scsi_cmnd *find, struct list_head *queue_head) + +static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, + unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) { - unsigned long flags; - struct CommandList *c = NULL; /* ptr into cmpQ */ + int rc = IO_OK; + struct scsi_cmnd *scmd; /* scsi command within request being aborted */ + struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ + unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ + unsigned char *psa = &phys_scsi3addr[0]; + + /* Get a pointer to the hpsa logical device. */ + scmd = abort->scsi_cmd; + dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); + if (dev == NULL) { + dev_warn(&h->pdev->dev, + "Cannot abort: no device pointer for command.\n"); + return -1; /* not abortable */ + } - if (!find) - return 0; - spin_lock_irqsave(&h->lock, flags); - list_for_each_entry(c, queue_head, list) { - if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ - continue; - if (c->scsi_cmd == find) { - spin_unlock_irqrestore(&h->lock, flags); - return c; - } + if (h->raid_offload_debug > 0) + dev_info(&h->pdev->dev, + "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + h->scsi_host->host_no, dev->bus, dev->target, dev->lun, + "Reset as abort", + scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], + scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); + + if (!dev->offload_enabled) { + dev_warn(&h->pdev->dev, + "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); + return -1; /* not abortable */ } - spin_unlock_irqrestore(&h->lock, flags); - return NULL; + + /* Incoming scsi3addr is logical addr. We need physical disk addr. */ + if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { + dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); + return -1; /* not abortable */ + } + + /* send the reset */ + if (h->raid_offload_debug > 0) + dev_info(&h->pdev->dev, + "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + psa[0], psa[1], psa[2], psa[3], + psa[4], psa[5], psa[6], psa[7]); + rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue); + if (rc != 0) { + dev_warn(&h->pdev->dev, + "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + psa[0], psa[1], psa[2], psa[3], + psa[4], psa[5], psa[6], psa[7]); + return rc; /* failed to reset */ + } + + /* wait for device to recover */ + if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { + dev_warn(&h->pdev->dev, + "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + psa[0], psa[1], psa[2], psa[3], + psa[4], psa[5], psa[6], psa[7]); + return -1; /* failed to recover */ + } + + /* device recovered */ + dev_info(&h->pdev->dev, + "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + psa[0], psa[1], psa[2], psa[3], + psa[4], psa[5], psa[6], psa[7]); + + return rc; /* success */ } -static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h, - u8 *tag, struct list_head *queue_head) +static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, + struct CommandList *abort, int reply_queue) { - unsigned long flags; + int rc = IO_OK; struct CommandList *c; + __le32 taglower, tagupper; + struct hpsa_scsi_dev_t *dev; + struct io_accel2_cmd *c2; - spin_lock_irqsave(&h->lock, flags); - list_for_each_entry(c, queue_head, list) { - if (memcmp(&c->Header.Tag, tag, 8) != 0) - continue; - spin_unlock_irqrestore(&h->lock, flags); - return c; + dev = abort->scsi_cmd->device->hostdata; + if (!dev->offload_enabled && !dev->hba_ioaccel_enabled) + return -1; + + c = cmd_alloc(h); + setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); + c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + hpsa_get_tag(h, abort, &taglower, &tagupper); + dev_dbg(&h->pdev->dev, + "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", + __func__, tagupper, taglower); + /* no unmap needed here because no data xfer. */ + + dev_dbg(&h->pdev->dev, + "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n", + __func__, tagupper, taglower, c2->error_data.serv_response); + switch (c2->error_data.serv_response) { + case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: + case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: + rc = 0; + break; + case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: + case IOACCEL2_SERV_RESPONSE_FAILURE: + case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: + rc = -1; + break; + default: + dev_warn(&h->pdev->dev, + "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n", + __func__, tagupper, taglower, + c2->error_data.serv_response); + rc = -1; } - spin_unlock_irqrestore(&h->lock, flags); - return NULL; + cmd_free(h, c); + dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, + tagupper, taglower); + return rc; } -/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to - * tell which kind we're dealing with, so we send the abort both ways. There - * shouldn't be any collisions between swizzled and unswizzled tags due to the - * way we construct our tags but we check anyway in case the assumptions which - * make this true someday become false. - */ static int hpsa_send_abort_both_ways(struct ctlr_info *h, - unsigned char *scsi3addr, struct CommandList *abort) + unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) { - u8 swizzled_tag[8]; - struct CommandList *c; - int rc = 0, rc2 = 0; + /* + * ioccelerator mode 2 commands should be aborted via the + * accelerated path, since RAID path is unaware of these commands, + * but not all underlying firmware can handle abort TMF. + * Change abort to physical device reset when abort TMF is unsupported. + */ + if (abort->cmd_type == CMD_IOACCEL2) { + if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) + return hpsa_send_abort_ioaccel2(h, abort, + reply_queue); + else + return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, + abort, reply_queue); + } + return hpsa_send_abort(h, scsi3addr, abort, reply_queue); +} - /* we do not expect to find the swizzled tag in our queue, but - * check anyway just to be sure the assumptions which make this - * the case haven't become wrong. - */ - memcpy(swizzled_tag, &abort->Request.CDB[4], 8); - swizzle_abort_tag(swizzled_tag); - c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ); - if (c != NULL) { - dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n"); - return hpsa_send_abort(h, scsi3addr, abort, 0); - } - rc = hpsa_send_abort(h, scsi3addr, abort, 0); - - /* if the command is still in our queue, we can't conclude that it was - * aborted (it might have just completed normally) but in any case - * we don't need to try to abort it another way. - */ - c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ); - if (c) - rc2 = hpsa_send_abort(h, scsi3addr, abort, 1); - return rc && rc2; +/* Find out which reply queue a command was meant to return on */ +static int hpsa_extract_reply_queue(struct ctlr_info *h, + struct CommandList *c) +{ + if (c->cmd_type == CMD_IOACCEL2) + return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue; + return c->Header.ReplyQueue; +} + +/* + * Limit concurrency of abort commands to prevent + * over-subscription of commands + */ +static inline int wait_for_available_abort_cmd(struct ctlr_info *h) +{ +#define ABORT_CMD_WAIT_MSECS 5000 + return !wait_event_timeout(h->abort_cmd_wait_queue, + atomic_dec_if_positive(&h->abort_cmds_available) >= 0, + msecs_to_jiffies(ABORT_CMD_WAIT_MSECS)); } /* Send an abort for the specified command. @@ -2593,30 +5869,26 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) { - int i, rc; + int rc; struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; struct CommandList *abort; /* pointer to command to be aborted */ - struct CommandList *found; struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ char msg[256]; /* For debug messaging. */ int ml = 0; + __le32 tagupper, taglower; + int refcount, reply_queue; - /* Find the controller of the command to be aborted */ - h = sdev_to_hba(sc->device); - if (WARN(h == NULL, - "ABORT REQUEST FAILED, Controller lookup failed.\n")) + if (sc == NULL) return FAILED; - /* Check that controller supports some kind of task abort */ - if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && - !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) + if (sc->device == NULL) return FAILED; - memset(msg, 0, sizeof(msg)); - ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ", - h->scsi_host->host_no, sc->device->channel, - sc->device->id, sc->device->lun); + /* Find the controller of the command to be aborted */ + h = sdev_to_hba(sc->device); + if (h == NULL) + return FAILED; /* Find the device of the command to be aborted */ dev = sc->device->hostdata; @@ -2626,193 +5898,231 @@ return FAILED; } + /* If controller locked up, we can guarantee command won't complete */ + if (lockup_detected(h)) { + hpsa_show_dev_msg(KERN_WARNING, h, dev, + "ABORT FAILED, lockup detected"); + return FAILED; + } + + /* This is a good time to check if controller lockup has occurred */ + if (detect_controller_lockup(h)) { + hpsa_show_dev_msg(KERN_WARNING, h, dev, + "ABORT FAILED, new lockup detected"); + return FAILED; + } + + /* Check that controller supports some kind of task abort */ + if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && + !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) + return FAILED; + + memset(msg, 0, sizeof(msg)); + ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p", + h->scsi_host->host_no, sc->device->channel, + sc->device->id, sc->device->lun, + "Aborting command", sc); + /* Get SCSI command to be aborted */ abort = (struct CommandList *) sc->host_scribble; if (abort == NULL) { - dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n", - msg); - return FAILED; + /* This can happen if the command already completed. */ + return SUCCESS; } - - ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", - abort->Header.Tag.upper, abort->Header.Tag.lower); - as = (struct scsi_cmnd *) abort->scsi_cmd; - if (as != NULL) - ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", - as->cmnd[0], as->serial_number); - dev_dbg(&h->pdev->dev, "%s\n", msg); - dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", - h->scsi_host->host_no, dev->bus, dev->target, dev->lun); - - /* Search reqQ to See if command is queued but not submitted, - * if so, complete the command with aborted status and remove - * it from the reqQ. - */ - found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ); - if (found) { - found->err_info->CommandStatus = CMD_ABORTED; - finish_cmd(found); - dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n", - msg); + refcount = atomic_inc_return(&abort->refcount); + if (refcount == 1) { /* Command is done already. */ + cmd_free(h, abort); return SUCCESS; } - /* not in reqQ, if also not in cmpQ, must have already completed */ - found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); - if (!found) { - dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n", - msg); + /* Don't bother trying the abort if we know it won't work. */ + if (abort->cmd_type != CMD_IOACCEL2 && + abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) { + cmd_free(h, abort); + return FAILED; + } + + /* + * Check that we're aborting the right command. + * It's possible the CommandList already completed and got re-used. + */ + if (abort->scsi_cmd != sc) { + cmd_free(h, abort); return SUCCESS; } + abort->abort_pending = true; + hpsa_get_tag(h, abort, &taglower, &tagupper); + reply_queue = hpsa_extract_reply_queue(h, abort); + ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); + as = abort->scsi_cmd; + if (as != NULL) + ml += sprintf(msg+ml, + "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ", + as->cmd_len, as->cmnd[0], as->cmnd[1], + as->serial_number); + dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg); + hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command"); + /* * Command is in flight, or possibly already completed * by the firmware (but not to the scsi mid layer) but we can't * distinguish which. Send the abort down. */ - rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); - if (rc != 0) { - dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); - dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", - h->scsi_host->host_no, - dev->bus, dev->target, dev->lun); + if (wait_for_available_abort_cmd(h)) { + dev_warn(&h->pdev->dev, + "%s FAILED, timeout waiting for an abort command to become available.\n", + msg); + cmd_free(h, abort); return FAILED; } - dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); - - /* If the abort(s) above completed and actually aborted the - * command, then the command to be aborted should already be - * completed. If not, wait around a bit more to see if they - * manage to complete normally. - */ -#define ABORT_COMPLETE_WAIT_SECS 30 - for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { - found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); - if (!found) - return SUCCESS; - msleep(100); + rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue); + atomic_inc(&h->abort_cmds_available); + wake_up_all(&h->abort_cmd_wait_queue); + if (rc != 0) { + dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg); + hpsa_show_dev_msg(KERN_WARNING, h, dev, + "FAILED to abort command"); + cmd_free(h, abort); + return FAILED; } - dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", - msg, ABORT_COMPLETE_WAIT_SECS); - return FAILED; + dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg); + wait_event(h->event_sync_wait_queue, + abort->scsi_cmd != sc || lockup_detected(h)); + cmd_free(h, abort); + return !lockup_detected(h) ? SUCCESS : FAILED; } - /* - * For operations that cannot sleep, a command block is allocated at init, - * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track - * which ones are free or in use. Lock must be held when calling this. - * cmd_free() is the complement. + * For operations with an associated SCSI command, a command block is allocated + * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the + * block request tag as an index into a table of entries. cmd_tagged_free() is + * the complement, although cmd_free() may be called instead. */ -static struct CommandList *cmd_alloc(struct ctlr_info *h) +static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, + struct scsi_cmnd *scmd) { - struct CommandList *c; - int i; - union u64bit temp64; - dma_addr_t cmd_dma_handle, err_dma_handle; - unsigned long flags; - - spin_lock_irqsave(&h->lock, flags); - do { - i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); - if (i == h->nr_cmds) { - spin_unlock_irqrestore(&h->lock, flags); - return NULL; - } - } while (test_and_set_bit - (i & (BITS_PER_LONG - 1), - h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); - h->nr_allocs++; - spin_unlock_irqrestore(&h->lock, flags); - - c = h->cmd_pool + i; - memset(c, 0, sizeof(*c)); - cmd_dma_handle = h->cmd_pool_dhandle - + i * sizeof(*c); - c->err_info = h->errinfo_pool + i; - memset(c->err_info, 0, sizeof(*c->err_info)); - err_dma_handle = h->errinfo_pool_dhandle - + i * sizeof(*c->err_info); + int idx = hpsa_get_cmd_index(scmd); + struct CommandList *c = h->cmd_pool + idx; - c->cmdindex = i; + if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { + dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", + idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); + /* The index value comes from the block layer, so if it's out of + * bounds, it's probably not our bug. + */ + BUG(); + } - INIT_LIST_HEAD(&c->list); - c->busaddr = (u32) cmd_dma_handle; - temp64.val = (u64) err_dma_handle; - c->ErrDesc.Addr.lower = temp64.val32.lower; - c->ErrDesc.Addr.upper = temp64.val32.upper; - c->ErrDesc.Len = sizeof(*c->err_info); + atomic_inc(&c->refcount); + if (unlikely(!hpsa_is_cmd_idle(c))) { + /* + * We expect that the SCSI layer will hand us a unique tag + * value. Thus, there should never be a collision here between + * two requests...because if the selected command isn't idle + * then someone is going to be very disappointed. + */ + dev_err(&h->pdev->dev, + "tag collision (tag=%d) in cmd_tagged_alloc().\n", + idx); + if (c->scsi_cmd != NULL) + scsi_print_command(c->scsi_cmd); + scsi_print_command(scmd); + } - c->h = h; + hpsa_cmd_partial_init(h, idx, c); return c; } -/* For operations that can wait for kmalloc to possibly sleep, - * this routine can be called. Lock need not be held to call - * cmd_special_alloc. cmd_special_free() is the complement. - */ -static struct CommandList *cmd_special_alloc(struct ctlr_info *h) +static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) { - struct CommandList *c; - union u64bit temp64; - dma_addr_t cmd_dma_handle, err_dma_handle; + /* + * Release our reference to the block. We don't need to do anything + * else to free it, because it is accessed by index. (There's no point + * in checking the result of the decrement, since we cannot guarantee + * that there isn't a concurrent abort which is also accessing it.) + */ + (void)atomic_dec(&c->refcount); +} - c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); - if (c == NULL) - return NULL; - memset(c, 0, sizeof(*c)); +/* + * For operations that cannot sleep, a command block is allocated at init, + * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track + * which ones are free or in use. Lock must be held when calling this. + * cmd_free() is the complement. + * This function never gives up and returns NULL. If it hangs, + * another thread must call cmd_free() to free some tags. + */ - c->cmdindex = -1; +static struct CommandList *cmd_alloc(struct ctlr_info *h) +{ + struct CommandList *c; + int refcount, i; + int offset = 0; - c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), - &err_dma_handle); + /* + * There is some *extremely* small but non-zero chance that that + * multiple threads could get in here, and one thread could + * be scanning through the list of bits looking for a free + * one, but the free ones are always behind him, and other + * threads sneak in behind him and eat them before he can + * get to them, so that while there is always a free one, a + * very unlucky thread might be starved anyway, never able to + * beat the other threads. In reality, this happens so + * infrequently as to be indistinguishable from never. + * + * Note that we start allocating commands before the SCSI host structure + * is initialized. Since the search starts at bit zero, this + * all works, since we have at least one command structure available; + * however, it means that the structures with the low indexes have to be + * reserved for driver-initiated requests, while requests from the block + * layer will use the higher indexes. + */ - if (c->err_info == NULL) { - pci_free_consistent(h->pdev, - sizeof(*c), c, cmd_dma_handle); - return NULL; + for (;;) { + i = find_next_zero_bit(h->cmd_pool_bits, + HPSA_NRESERVED_CMDS, + offset); + if (unlikely(i >= HPSA_NRESERVED_CMDS)) { + offset = 0; + continue; + } + c = h->cmd_pool + i; + refcount = atomic_inc_return(&c->refcount); + if (unlikely(refcount > 1)) { + cmd_free(h, c); /* already in use */ + offset = (i + 1) % HPSA_NRESERVED_CMDS; + continue; + } + set_bit(i & (BITS_PER_LONG - 1), + h->cmd_pool_bits + (i / BITS_PER_LONG)); + break; /* it's ours now. */ } - memset(c->err_info, 0, sizeof(*c->err_info)); - - INIT_LIST_HEAD(&c->list); - c->busaddr = (u32) cmd_dma_handle; - temp64.val = (u64) err_dma_handle; - c->ErrDesc.Addr.lower = temp64.val32.lower; - c->ErrDesc.Addr.upper = temp64.val32.upper; - c->ErrDesc.Len = sizeof(*c->err_info); - - c->h = h; + hpsa_cmd_partial_init(h, i, c); return c; } +/* + * This is the complementary operation to cmd_alloc(). Note, however, in some + * corner cases it may also be used to free blocks allocated by + * cmd_tagged_alloc() in which case the ref-count decrement does the trick and + * the clear-bit is harmless. + */ static void cmd_free(struct ctlr_info *h, struct CommandList *c) { - int i; - unsigned long flags; - - i = c - h->cmd_pool; - spin_lock_irqsave(&h->lock, flags); - clear_bit(i & (BITS_PER_LONG - 1), - h->cmd_pool_bits + (i / BITS_PER_LONG)); - h->nr_frees++; - spin_unlock_irqrestore(&h->lock, flags); -} - -static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) -{ - union u64bit temp64; + if (atomic_dec_and_test(&c->refcount)) { + int i; - temp64.val32.lower = c->ErrDesc.Addr.lower; - temp64.val32.upper = c->ErrDesc.Addr.upper; - pci_free_consistent(h->pdev, sizeof(*c->err_info), - c->err_info, (dma_addr_t) temp64.val); - pci_free_consistent(h->pdev, sizeof(*c), - c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); + i = c - h->cmd_pool; + clear_bit(i & (BITS_PER_LONG - 1), + h->cmd_pool_bits + (i / BITS_PER_LONG)); + } } #ifdef CONFIG_COMPAT -static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) +static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, + void __user *arg) { IOCTL32_Command_struct __user *arg32 = (IOCTL32_Command_struct __user *) arg; @@ -2837,7 +6147,7 @@ if (err) return -EFAULT; - err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); + err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, @@ -2848,7 +6158,7 @@ } static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, - int cmd, void *arg) + int cmd, void __user *arg) { BIG_IOCTL32_Command_struct __user *arg32 = (BIG_IOCTL32_Command_struct __user *) arg; @@ -2875,7 +6185,7 @@ if (err) return -EFAULT; - err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); + err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, @@ -2885,7 +6195,7 @@ return err; } -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) +static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) { switch (cmd) { case CCISS_GETPCIINFO: @@ -2959,7 +6269,7 @@ IOCTL_Command_struct iocommand; struct CommandList *c; char *buff = NULL; - union u64bit temp64; + u64 temp64; int rc = 0; if (!argp) @@ -2975,8 +6285,8 @@ if (iocommand.buf_size > 0) { buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) - return -EFAULT; - if (iocommand.Request.Type.Direction == XFER_WRITE) { + return -ENOMEM; + if (iocommand.Request.Type.Direction & XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { @@ -2987,25 +6297,21 @@ memset(buff, 0, iocommand.buf_size); } } - c = cmd_special_alloc(h); - if (c == NULL) { - rc = -ENOMEM; - goto out_kfree; - } + c = cmd_alloc(h); + /* Fill in the command type */ c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; /* Fill in Command Header */ c->Header.ReplyQueue = 0; /* unused in simple mode */ if (iocommand.buf_size > 0) { /* buffer to fill */ c->Header.SGList = 1; - c->Header.SGTotal = 1; + c->Header.SGTotal = cpu_to_le16(1); } else { /* no buffers to fill */ c->Header.SGList = 0; - c->Header.SGTotal = 0; + c->Header.SGTotal = cpu_to_le16(0); } memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); - /* use the kernel address the cmd block for tag */ - c->Header.Tag.lower = c->busaddr; /* Fill in Request block */ memcpy(&c->Request, &iocommand.Request, @@ -3013,24 +6319,26 @@ /* Fill in the scatter gather information */ if (iocommand.buf_size > 0) { - temp64.val = pci_map_single(h->pdev, buff, + temp64 = pci_map_single(h->pdev, buff, iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); - if (dma_mapping_error(&h->pdev->dev, temp64.val)) { - c->SG[0].Addr.lower = 0; - c->SG[0].Addr.upper = 0; - c->SG[0].Len = 0; + if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { + c->SG[0].Addr = cpu_to_le64(0); + c->SG[0].Len = cpu_to_le32(0); rc = -ENOMEM; goto out; } - c->SG[0].Addr.lower = temp64.val32.lower; - c->SG[0].Addr.upper = temp64.val32.upper; - c->SG[0].Len = iocommand.buf_size; - c->SG[0].Ext = 0; /* we are not chaining*/ + c->SG[0].Addr = cpu_to_le64(temp64); + c->SG[0].Len = cpu_to_le32(iocommand.buf_size); + c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } - hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (iocommand.buf_size > 0) hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); + if (rc) { + rc = -EIO; + goto out; + } /* Copy the error information out */ memcpy(&iocommand.error_info, c->err_info, @@ -3039,7 +6347,7 @@ rc = -EFAULT; goto out; } - if (iocommand.Request.Type.Direction == XFER_READ && + if ((iocommand.Request.Type.Direction & XFER_READ) && iocommand.buf_size > 0) { /* Copy the data out of the buffer we created */ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { @@ -3048,7 +6356,7 @@ } } out: - cmd_special_free(h, c); + cmd_free(h, c); out_kfree: kfree(buff); return rc; @@ -3060,10 +6368,9 @@ struct CommandList *c; unsigned char **buff = NULL; int *buff_size = NULL; - union u64bit temp64; + u64 temp64; BYTE sg_used = 0; int status = 0; - int i; u32 left; u32 sz; BYTE __user *data_ptr; @@ -3116,7 +6423,7 @@ status = -ENOMEM; goto cleanup1; } - if (ioc->Request.Type.Direction == XFER_WRITE) { + if (ioc->Request.Type.Direction & XFER_WRITE) { if (copy_from_user(buff[sg_used], data_ptr, sz)) { status = -EFAULT; goto cleanup1; @@ -3127,65 +6434,70 @@ data_ptr += sz; sg_used++; } - c = cmd_special_alloc(h); - if (c == NULL) { - status = -ENOMEM; - goto cleanup1; - } + c = cmd_alloc(h); + c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; c->Header.ReplyQueue = 0; - c->Header.SGList = c->Header.SGTotal = sg_used; + c->Header.SGList = (u8) sg_used; + c->Header.SGTotal = cpu_to_le16(sg_used); memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); - c->Header.Tag.lower = c->busaddr; memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); if (ioc->buf_size > 0) { int i; for (i = 0; i < sg_used; i++) { - temp64.val = pci_map_single(h->pdev, buff[i], + temp64 = pci_map_single(h->pdev, buff[i], buff_size[i], PCI_DMA_BIDIRECTIONAL); - if (dma_mapping_error(&h->pdev->dev, temp64.val)) { - c->SG[i].Addr.lower = 0; - c->SG[i].Addr.upper = 0; - c->SG[i].Len = 0; + if (dma_mapping_error(&h->pdev->dev, + (dma_addr_t) temp64)) { + c->SG[i].Addr = cpu_to_le64(0); + c->SG[i].Len = cpu_to_le32(0); hpsa_pci_unmap(h->pdev, c, i, PCI_DMA_BIDIRECTIONAL); status = -ENOMEM; - goto cleanup1; + goto cleanup0; } - c->SG[i].Addr.lower = temp64.val32.lower; - c->SG[i].Addr.upper = temp64.val32.upper; - c->SG[i].Len = buff_size[i]; - /* we are not chaining */ - c->SG[i].Ext = 0; + c->SG[i].Addr = cpu_to_le64(temp64); + c->SG[i].Len = cpu_to_le32(buff_size[i]); + c->SG[i].Ext = cpu_to_le32(0); } + c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); } - hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); + status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (sg_used) hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); + if (status) { + status = -EIO; + goto cleanup0; + } + /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); if (copy_to_user(argp, ioc, sizeof(*ioc))) { - cmd_special_free(h, c); status = -EFAULT; - goto cleanup1; + goto cleanup0; } - if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { + if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { + int i; + /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { if (copy_to_user(ptr, buff[i], buff_size[i])) { - cmd_special_free(h, c); status = -EFAULT; - goto cleanup1; + goto cleanup0; } ptr += buff_size[i]; } } - cmd_special_free(h, c); status = 0; +cleanup0: + cmd_free(h, c); cleanup1: if (buff) { + int i; + for (i = 0; i < sg_used; i++) kfree(buff[i]); kfree(buff); @@ -3202,13 +6514,15 @@ c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) (void) check_for_unit_attention(h, c); } + /* * ioctl */ -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) +static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) { struct ctlr_info *h; void __user *argp = (void __user *)arg; + int rc; h = sdev_to_hba(dev); @@ -3223,22 +6537,29 @@ case CCISS_GETDRIVVER: return hpsa_getdrivver_ioctl(h, argp); case CCISS_PASSTHRU: - return hpsa_passthru_ioctl(h, argp); + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + rc = hpsa_passthru_ioctl(h, argp); + atomic_inc(&h->passthru_cmds_avail); + return rc; case CCISS_BIG_PASSTHRU: - return hpsa_big_passthru_ioctl(h, argp); + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + rc = hpsa_big_passthru_ioctl(h, argp); + atomic_inc(&h->passthru_cmds_avail); + return rc; default: return -ENOTTY; } } -static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, +static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, u8 reset_type) { struct CommandList *c; c = cmd_alloc(h); - if (!c) - return -ENOMEM; + /* fill_cmd can't fail here, no data buffer to map */ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, RAID_CTLR_LUNID, TYPE_MSG); @@ -3249,40 +6570,39 @@ * the command either. This is the last command we will send before * re-initializing everything, so it doesn't matter and won't leak. */ - return 0; + return; } static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, - void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, + void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, int cmd_type) { int pci_dir = XFER_NONE; - struct CommandList *a; /* for commands to be aborted */ + u64 tag; /* for commands to be aborted */ c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; c->Header.ReplyQueue = 0; if (buff != NULL && size > 0) { c->Header.SGList = 1; - c->Header.SGTotal = 1; + c->Header.SGTotal = cpu_to_le16(1); } else { c->Header.SGList = 0; - c->Header.SGTotal = 0; + c->Header.SGTotal = cpu_to_le16(0); } - c->Header.Tag.lower = c->busaddr; memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); - c->Request.Type.Type = cmd_type; if (cmd_type == TYPE_CMD) { switch (cmd) { case HPSA_INQUIRY: /* are we trying to read a vital product page */ - if (page_code != 0) { + if (page_code & VPD_PAGE) { c->Request.CDB[1] = 0x01; - c->Request.CDB[2] = page_code; + c->Request.CDB[2] = (page_code & 0xff); } c->Request.CDBLen = 6; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = HPSA_INQUIRY; c->Request.CDB[4] = size & 0xFF; @@ -3293,8 +6613,8 @@ mode = 00 target = 0. Nothing to write. */ c->Request.CDBLen = 12; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ @@ -3302,10 +6622,29 @@ c->Request.CDB[8] = (size >> 8) & 0xFF; c->Request.CDB[9] = size & 0xFF; break; + case BMIC_SENSE_DIAG_OPTIONS: + c->Request.CDBLen = 16; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + /* Spec says this should be BMIC_WRITE */ + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS; + break; + case BMIC_SET_DIAG_OPTIONS: + c->Request.CDBLen = 16; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, + ATTR_SIMPLE, XFER_WRITE); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_WRITE; + c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS; + break; case HPSA_CACHE_FLUSH: c->Request.CDBLen = 12; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_WRITE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, + ATTR_SIMPLE, XFER_WRITE); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_WRITE; c->Request.CDB[6] = BMIC_CACHE_FLUSH; @@ -3314,9 +6653,67 @@ break; case TEST_UNIT_READY: c->Request.CDBLen = 6; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_NONE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); + c->Request.Timeout = 0; + break; + case HPSA_GET_RAID_MAP: + c->Request.CDBLen = 12; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = HPSA_CISS_READ; + c->Request.CDB[1] = cmd; + c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0xFF; + c->Request.CDB[9] = size & 0xFF; + break; + case BMIC_SENSE_CONTROLLER_PARAMETERS: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0xFF; + break; + case BMIC_IDENTIFY_PHYSICAL_DEVICE: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0XFF; + break; + case BMIC_SENSE_SUBSYSTEM_INFORMATION: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0XFF; + break; + case BMIC_IDENTIFY_CONTROLLER: + c->Request.CDBLen = 10; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[1] = 0; + c->Request.CDB[2] = 0; + c->Request.CDB[3] = 0; + c->Request.CDB[4] = 0; + c->Request.CDB[5] = 0; + c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0XFF; + c->Request.CDB[9] = 0; break; default: dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); @@ -3326,11 +6723,24 @@ } else if (cmd_type == TYPE_MSG) { switch (cmd) { + case HPSA_PHYS_TARGET_RESET: + c->Request.CDBLen = 16; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); + c->Request.Timeout = 0; /* Don't time out */ + memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); + c->Request.CDB[0] = HPSA_RESET; + c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE; + /* Physical target reset needs no control bytes 4-7*/ + c->Request.CDB[4] = 0x00; + c->Request.CDB[5] = 0x00; + c->Request.CDB[6] = 0x00; + c->Request.CDB[7] = 0x00; + break; case HPSA_DEVICE_RESET_MSG: c->Request.CDBLen = 16; - c->Request.Type.Type = 1; /* It is a MSG not a CMD */ - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_NONE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); c->Request.Timeout = 0; /* Don't time out */ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; @@ -3343,28 +6753,21 @@ c->Request.CDB[7] = 0x00; break; case HPSA_ABORT_MSG: - a = buff; /* point to command to be aborted */ - dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n", - a->Header.Tag.upper, a->Header.Tag.lower, - c->Header.Tag.upper, c->Header.Tag.lower); + memcpy(&tag, buff, sizeof(tag)); + dev_dbg(&h->pdev->dev, + "Abort Tag:0x%016llx using rqst Tag:0x%016llx", + tag, c->Header.tag); c->Request.CDBLen = 16; - c->Request.Type.Type = TYPE_MSG; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_WRITE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, + ATTR_SIMPLE, XFER_WRITE); c->Request.Timeout = 0; /* Don't time out */ c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; c->Request.CDB[2] = 0x00; /* reserved */ c->Request.CDB[3] = 0x00; /* reserved */ /* Tag to abort goes in CDB[4]-CDB[11] */ - c->Request.CDB[4] = a->Header.Tag.lower & 0xFF; - c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF; - c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF; - c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF; - c->Request.CDB[8] = a->Header.Tag.upper & 0xFF; - c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF; - c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF; - c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF; + memcpy(&c->Request.CDB[4], &tag, sizeof(tag)); c->Request.CDB[12] = 0x00; /* reserved */ c->Request.CDB[13] = 0x00; /* reserved */ c->Request.CDB[14] = 0x00; /* reserved */ @@ -3380,7 +6783,7 @@ BUG(); } - switch (c->Request.Type.Direction) { + switch (GET_DIR(c->Request.type_attr_dir)) { case XFER_READ: pci_dir = PCI_DMA_FROMDEVICE; break; @@ -3411,46 +6814,6 @@ return page_remapped ? (page_remapped + page_offs) : NULL; } -/* Takes cmds off the submission queue and sends them to the hardware, - * then puts them on the queue of cmds waiting for completion. - */ -static void start_io(struct ctlr_info *h) -{ - struct CommandList *c; - unsigned long flags; - - spin_lock_irqsave(&h->lock, flags); - while (!list_empty(&h->reqQ)) { - c = list_entry(h->reqQ.next, struct CommandList, list); - /* can't do anything if fifo is full */ - if ((h->access.fifo_full(h))) { - dev_warn(&h->pdev->dev, "fifo full\n"); - break; - } - - /* Get the first entry from the Request Q */ - removeQ(c); - h->Qdepth--; - - /* Put job onto the completed Q */ - addQ(&h->cmpQ, c); - - /* Must increment commands_outstanding before unlocking - * and submitting to avoid race checking for fifo full - * condition. - */ - h->commands_outstanding++; - if (h->commands_outstanding > h->max_outstanding) - h->max_outstanding = h->commands_outstanding; - - /* Tell the controller execute command */ - spin_unlock_irqrestore(&h->lock, flags); - h->access.submit_command(h, c); - spin_lock_irqsave(&h->lock, flags); - } - spin_unlock_irqrestore(&h->lock, flags); -} - static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) { return h->access.command_completed(h, q); @@ -3479,38 +6842,14 @@ static inline void finish_cmd(struct CommandList *c) { - unsigned long flags; - - spin_lock_irqsave(&c->h->lock, flags); - removeQ(c); - spin_unlock_irqrestore(&c->h->lock, flags); dial_up_lockup_detection_on_fw_flash_complete(c->h, c); - if (likely(c->cmd_type == CMD_SCSI)) + if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI + || c->cmd_type == CMD_IOACCEL2)) complete_scsi_command(c); - else if (c->cmd_type == CMD_IOCTL_PEND) + else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) complete(c->waiting); } -static inline u32 hpsa_tag_contains_index(u32 tag) -{ - return tag & DIRECT_LOOKUP_BIT; -} - -static inline u32 hpsa_tag_to_index(u32 tag) -{ - return tag >> DIRECT_LOOKUP_SHIFT; -} - - -static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) -{ -#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) -#define HPSA_SIMPLE_ERROR_BITS 0x03 - if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) - return tag & ~HPSA_SIMPLE_ERROR_BITS; - return tag & ~HPSA_PERF_ERROR_BITS; -} - /* process completion of an indexed ("direct lookup") command */ static inline void process_indexed_cmd(struct ctlr_info *h, u32 raw_tag) @@ -3518,34 +6857,13 @@ u32 tag_index; struct CommandList *c; - tag_index = hpsa_tag_to_index(raw_tag); + tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; if (!bad_tag(h, tag_index, raw_tag)) { c = h->cmd_pool + tag_index; finish_cmd(c); } } -/* process completion of a non-indexed command */ -static inline void process_nonindexed_cmd(struct ctlr_info *h, - u32 raw_tag) -{ - u32 tag; - struct CommandList *c = NULL; - unsigned long flags; - - tag = hpsa_tag_discard_error_bits(h, raw_tag); - spin_lock_irqsave(&h->lock, flags); - list_for_each_entry(c, &h->cmpQ, list) { - if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { - spin_unlock_irqrestore(&h->lock, flags); - finish_cmd(c); - return; - } - } - spin_unlock_irqrestore(&h->lock, flags); - bad_tag(h, h->nr_cmds + 1, raw_tag); -} - /* Some controllers, like p400, will give us one interrupt * after a soft reset, even if we turned interrupts off. * Only need to check for this in the hpsa_xxx_discard_completions @@ -3623,10 +6941,7 @@ while (interrupt_pending(h)) { raw_tag = get_next_completion(h, q); while (raw_tag != FIFO_EMPTY) { - if (likely(hpsa_tag_contains_index(raw_tag))) - process_indexed_cmd(h, raw_tag); - else - process_nonindexed_cmd(h, raw_tag); + process_indexed_cmd(h, raw_tag); raw_tag = next_command(h, q); } } @@ -3642,10 +6957,7 @@ h->last_intr_timestamp = get_jiffies_64(); raw_tag = get_next_completion(h, q); while (raw_tag != FIFO_EMPTY) { - if (likely(hpsa_tag_contains_index(raw_tag))) - process_indexed_cmd(h, raw_tag); - else - process_nonindexed_cmd(h, raw_tag); + process_indexed_cmd(h, raw_tag); raw_tag = next_command(h, q); } return IRQ_HANDLED; @@ -3667,7 +6979,8 @@ static const size_t cmd_sz = sizeof(*cmd) + sizeof(cmd->ErrorDescriptor); dma_addr_t paddr64; - uint32_t paddr32, tag; + __le32 paddr32; + u32 tag; void __iomem *vaddr; int i, err; @@ -3682,7 +6995,7 @@ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { iounmap(vaddr); - return -ENOMEM; + return err; } cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); @@ -3695,32 +7008,30 @@ * although there's no guarantee, we assume that the address is at * least 4-byte aligned (most likely, it's page-aligned). */ - paddr32 = paddr64; + paddr32 = cpu_to_le32(paddr64); cmd->CommandHeader.ReplyQueue = 0; cmd->CommandHeader.SGList = 0; - cmd->CommandHeader.SGTotal = 0; - cmd->CommandHeader.Tag.lower = paddr32; - cmd->CommandHeader.Tag.upper = 0; + cmd->CommandHeader.SGTotal = cpu_to_le16(0); + cmd->CommandHeader.tag = cpu_to_le64(paddr64); memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); cmd->Request.CDBLen = 16; - cmd->Request.Type.Type = TYPE_MSG; - cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; - cmd->Request.Type.Direction = XFER_NONE; + cmd->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); cmd->Request.Timeout = 0; /* Don't time out */ cmd->Request.CDB[0] = opcode; cmd->Request.CDB[1] = type; memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ - cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); - cmd->ErrorDescriptor.Addr.upper = 0; - cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); + cmd->ErrorDescriptor.Addr = + cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); + cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); - writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); + writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); - if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) + if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) break; msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); } @@ -3752,10 +7063,8 @@ #define hpsa_noop(p) hpsa_message(p, 3, 0) static int hpsa_controller_hard_reset(struct pci_dev *pdev, - void * __iomem vaddr, u32 use_doorbell) + void __iomem *vaddr, u32 use_doorbell) { - u16 pmcsr; - int pos; if (use_doorbell) { /* For everything after the P600, the PCI power state method @@ -3764,6 +7073,13 @@ */ dev_info(&pdev->dev, "using doorbell to reset controller\n"); writel(use_doorbell, vaddr + SA5_DOORBELL); + + /* PMC hardware guys tell us we need a 10 second delay after + * doorbell reset and before any attempt to talk to the board + * at all to ensure that this actually works and doesn't fall + * over in some weird corner cases. + */ + msleep(10000); } else { /* Try to do it the PCI power state way */ /* Quoting from the Open CISS Specification: "The Power @@ -3774,26 +7090,21 @@ * this causes a secondary PCI reset which will reset the * controller." */ - pos = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (pos == 0) { - dev_err(&pdev->dev, - "hpsa_reset_controller: " - "PCI PM not supported\n"); - return -ENODEV; - } + int rc = 0; + dev_info(&pdev->dev, "using PCI PM to reset controller\n"); + /* enter the D3hot power management state */ - pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - pmcsr |= PCI_D3hot; - pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); + rc = pci_set_power_state(pdev, PCI_D3hot); + if (rc) + return rc; msleep(500); /* enter the D0 power management state */ - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - pmcsr |= PCI_D0; - pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); + rc = pci_set_power_state(pdev, PCI_D0); + if (rc) + return rc; /* * The P600 requires a small delay when changing states. @@ -3859,7 +7170,7 @@ /* This does a hard reset of the controller using PCI power management * states or the using the doorbell register. */ -static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) +static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id) { u64 cfg_offset; u32 cfg_base_addr; @@ -3870,7 +7181,6 @@ int rc; struct CfgTable __iomem *cfgtable; u32 use_doorbell; - u32 board_id; u16 command_register; /* For controllers as old as the P600, this is very nearly @@ -3886,9 +7196,8 @@ * using the doorbell register. */ - rc = hpsa_lookup_board_id(pdev, &board_id); - if (rc < 0 || !ctlr_is_resettable(board_id)) { - dev_warn(&pdev->dev, "Not resetting device.\n"); + if (!ctlr_is_resettable(board_id)) { + dev_warn(&pdev->dev, "Controller not resettable\n"); return -ENODEV; } @@ -3921,7 +7230,7 @@ } rc = write_driver_ver_to_cfgtable(cfgtable); if (rc) - goto unmap_vaddr; + goto unmap_cfgtable; /* If reset via doorbell register is supported, use that. * There are two such methods. Favor the newest method. @@ -3933,8 +7242,8 @@ } else { use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; if (use_doorbell) { - dev_warn(&pdev->dev, "Soft reset not supported. " - "Firmware update is required.\n"); + dev_warn(&pdev->dev, + "Soft reset not supported. Firmware update is required.\n"); rc = -ENOTSUPP; /* try soft reset */ goto unmap_cfgtable; } @@ -3951,21 +7260,10 @@ need a little pause here */ msleep(HPSA_POST_RESET_PAUSE_MSECS); - /* Wait for board to become not ready, then ready. */ - dev_info(&pdev->dev, "Waiting for board to reset.\n"); - rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); - if (rc) { - dev_warn(&pdev->dev, - "failed waiting for board to reset." - " Will try soft reset.\n"); - rc = -ENOTSUPP; /* Not expected, but try soft reset later */ - goto unmap_cfgtable; - } rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); if (rc) { dev_warn(&pdev->dev, - "failed waiting for board to become ready " - "after hard reset\n"); + "Failed waiting for board to become ready after hard reset\n"); goto unmap_cfgtable; } @@ -3993,7 +7291,7 @@ * the io functions. * This is for debug only. */ -static void print_cfg_table(struct device *dev, struct CfgTable *tb) +static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) { #ifdef HPSA_DEBUG int i; @@ -4016,7 +7314,7 @@ readl(&(tb->HostWrite.CoalIntDelay))); dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", readl(&(tb->HostWrite.CoalIntCount))); - dev_info(dev, " Max outstanding commands = 0x%d\n", + dev_info(dev, " Max outstanding commands = %d\n", readl(&(tb->CmdsOutMax))); dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); for (i = 0; i < 16; i++) @@ -4063,10 +7361,22 @@ return -1; } +static void hpsa_disable_interrupt_mode(struct ctlr_info *h) +{ + if (h->msix_vector) { + if (h->pdev->msix_enabled) + pci_disable_msix(h->pdev); + h->msix_vector = 0; + } else if (h->msi_vector) { + if (h->pdev->msi_enabled) + pci_disable_msi(h->pdev); + h->msi_vector = 0; + } +} + /* If MSI/MSI-X is supported by the kernel we will try to enable it on - * controllers that are capable. If not, we use IO-APIC mode. + * controllers that are capable. If not, we use legacy INTx mode. */ - static void hpsa_interrupt_mode(struct ctlr_info *h) { #ifdef CONFIG_PCI_MSI @@ -4083,27 +7393,28 @@ (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) goto default_int_mode; if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { - dev_info(&h->pdev->dev, "MSIX\n"); - err = pci_enable_msix(h->pdev, hpsa_msix_entries, - MAX_REPLY_QUEUES); - if (!err) { - for (i = 0; i < MAX_REPLY_QUEUES; i++) - h->intr[i] = hpsa_msix_entries[i].vector; - h->msix_vector = 1; - return; - } - if (err > 0) { + dev_info(&h->pdev->dev, "MSI-X capable controller\n"); + h->msix_vector = MAX_REPLY_QUEUES; + if (h->msix_vector > num_online_cpus()) + h->msix_vector = num_online_cpus(); + err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, + 1, h->msix_vector); + if (err < 0) { + dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); + h->msix_vector = 0; + goto single_msi_mode; + } else if (err < h->msix_vector) { dev_warn(&h->pdev->dev, "only %d MSI-X vectors " "available\n", err); - goto default_int_mode; - } else { - dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", - err); - goto default_int_mode; } + h->msix_vector = err; + for (i = 0; i < h->msix_vector; i++) + h->intr[i] = hpsa_msix_entries[i].vector; + return; } +single_msi_mode: if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { - dev_info(&h->pdev->dev, "MSI\n"); + dev_info(&h->pdev->dev, "MSI capable controller\n"); if (!pci_enable_msi(h->pdev)) h->msi_vector = 1; else @@ -4196,6 +7507,21 @@ return 0; } +static void hpsa_free_cfgtables(struct ctlr_info *h) +{ + if (h->transtable) { + iounmap(h->transtable); + h->transtable = NULL; + } + if (h->cfgtable) { + iounmap(h->cfgtable); + h->cfgtable = NULL; + } +} + +/* Find and map CISS config table and transfer table ++ * several items must be unmapped (freed) later ++ * */ static int hpsa_find_cfgtables(struct ctlr_info *h) { u64 cfg_offset; @@ -4210,8 +7536,10 @@ return rc; h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); - if (!h->cfgtable) + if (!h->cfgtable) { + dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); return -ENOMEM; + } rc = write_driver_ver_to_cfgtable(h->cfgtable); if (rc) return rc; @@ -4220,28 +7548,43 @@ h->transtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index)+cfg_offset+trans_offset, sizeof(*h->transtable)); - if (!h->transtable) + if (!h->transtable) { + dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); + hpsa_free_cfgtables(h); return -ENOMEM; + } return 0; } static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) { - h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); +#define MIN_MAX_COMMANDS 16 + BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS); + + h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); /* Limit commands in memory limited kdump scenario. */ if (reset_devices && h->max_commands > 32) h->max_commands = 32; - if (h->max_commands < 16) { - dev_warn(&h->pdev->dev, "Controller reports " - "max supported commands of %d, an obvious lie. " - "Using 16. Ensure that firmware is up to date.\n", - h->max_commands); - h->max_commands = 16; + if (h->max_commands < MIN_MAX_COMMANDS) { + dev_warn(&h->pdev->dev, + "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n", + h->max_commands, + MIN_MAX_COMMANDS); + h->max_commands = MIN_MAX_COMMANDS; } } +/* If the controller reports that the total max sg entries is greater than 512, + * then we know that chained SG blocks work. (Original smart arrays did not + * support chained SG blocks and would return zero for max sg entries.) + */ +static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) +{ + return h->maxsgentries > 512; +} + /* Interrogate the hardware for some limits: * max commands, max SG elements without chaining, and with chaining, * SG chain block size, etc. @@ -4249,45 +7592,55 @@ static void hpsa_find_board_params(struct ctlr_info *h) { hpsa_get_max_perf_mode_cmds(h); - h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ + h->nr_cmds = h->max_commands; h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); - /* - * Limit in-command s/g elements to 32 save dma'able memory. - * Howvever spec says if 0, use 31 - */ - h->max_cmd_sg_entries = 31; - if (h->maxsgentries > 512) { + h->fw_support = readl(&(h->cfgtable->misc_fw_support)); + if (hpsa_supports_chained_sg_blocks(h)) { + /* Limit in-command s/g elements to 32 save dma'able memory. */ h->max_cmd_sg_entries = 32; - h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; + h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; h->maxsgentries--; /* save one for chain pointer */ } else { + /* + * Original smart arrays supported at most 31 s/g entries + * embedded inline in the command (trying to use more + * would lock up the controller) + */ + h->max_cmd_sg_entries = 31; h->maxsgentries = 31; /* default to traditional values */ h->chainsize = 0; } /* Find out what task management functions are supported and cache */ h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); + if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); + if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); + if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); } static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) { if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { - dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); + dev_err(&h->pdev->dev, "not a valid CISS config table\n"); return false; } return true; } -/* Need to enable prefetch in the SCSI core for 6400 in x86 */ -static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) +static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) { -#ifdef CONFIG_X86 - u32 prefetch; + u32 driver_support; - prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); - prefetch |= 0x100; - writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); + driver_support = readl(&(h->cfgtable->driver_support)); + /* Need to enable prefetch in the SCSI core for 6400 in x86 */ +#ifdef CONFIG_X86 + driver_support |= ENABLE_SCSI_PREFETCH; #endif + driver_support |= ENABLE_UNIT_ATTN; + writel(driver_support, &(h->cfgtable->driver_support)); } /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result @@ -4304,7 +7657,27 @@ writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); } -static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) +static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) +{ + int i; + u32 doorbell_value; + unsigned long flags; + /* wait until the clear_event_notify bit 6 is cleared by controller. */ + for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) { + spin_lock_irqsave(&h->lock, flags); + doorbell_value = readl(h->vaddr + SA5_DOORBELL); + spin_unlock_irqrestore(&h->lock, flags); + if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) + goto done; + /* delay and try again */ + msleep(CLEAR_EVENT_WAIT_INTERVAL); + } + return -ENODEV; +done: + return 0; +} + +static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) { int i; u32 doorbell_value; @@ -4314,17 +7687,23 @@ * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right * as we enter this code.) */ - for (i = 0; i < MAX_CONFIG_WAIT; i++) { + for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { + if (h->remove_in_progress) + goto done; spin_lock_irqsave(&h->lock, flags); doorbell_value = readl(h->vaddr + SA5_DOORBELL); spin_unlock_irqrestore(&h->lock, flags); if (!(doorbell_value & CFGTBL_ChangeReq)) - break; + goto done; /* delay and try again */ - usleep_range(10000, 20000); + msleep(MODE_CHANGE_WAIT_INTERVAL); } + return -ENODEV; +done: + return 0; } +/* return -ENODEV or other reason on error, 0 on success */ static int hpsa_enter_simple_mode(struct ctlr_info *h) { u32 trans_support; @@ -4334,83 +7713,112 @@ return -ENOTSUPP; h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); + /* Update the field, and then ring the doorbell */ writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); + writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); - hpsa_wait_for_mode_change_ack(h); + if (hpsa_wait_for_mode_change_ack(h)) + goto error; print_cfg_table(&h->pdev->dev, h->cfgtable); - if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { - dev_warn(&h->pdev->dev, - "unable to get board into simple mode\n"); - return -ENODEV; - } + if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) + goto error; h->transMethod = CFGTBL_Trans_Simple; return 0; +error: + dev_err(&h->pdev->dev, "failed to enter simple mode\n"); + return -ENODEV; } +/* free items allocated or mapped by hpsa_pci_init */ +static void hpsa_free_pci_init(struct ctlr_info *h) +{ + hpsa_free_cfgtables(h); /* pci_init 4 */ + iounmap(h->vaddr); /* pci_init 3 */ + h->vaddr = NULL; + hpsa_disable_interrupt_mode(h); /* pci_init 2 */ + /* + * call pci_disable_device before pci_release_regions per + * Documentation/PCI/pci.txt + */ + pci_disable_device(h->pdev); /* pci_init 1 */ + pci_release_regions(h->pdev); /* pci_init 2 */ +} + +/* several items must be freed later */ static int hpsa_pci_init(struct ctlr_info *h) { int prod_index, err; prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); if (prod_index < 0) - return -ENODEV; + return prod_index; h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); + h->needs_abort_tags_swizzled = + ctlr_needs_abort_tags_swizzled(h->board_id); + pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); err = pci_enable_device(h->pdev); if (err) { - dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); + dev_err(&h->pdev->dev, "failed to enable PCI device\n"); + pci_disable_device(h->pdev); return err; } - /* Enable bus mastering (pci_disable_device may disable this) */ - pci_set_master(h->pdev); - err = pci_request_regions(h->pdev, HPSA); if (err) { dev_err(&h->pdev->dev, - "cannot obtain PCI resources, aborting\n"); + "failed to obtain PCI resources\n"); + pci_disable_device(h->pdev); return err; } + + pci_set_master(h->pdev); + hpsa_interrupt_mode(h); err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) - goto err_out_free_res; + goto clean2; /* intmode+region, pci */ h->vaddr = remap_pci_mem(h->paddr, 0x250); if (!h->vaddr) { + dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); err = -ENOMEM; - goto err_out_free_res; + goto clean2; /* intmode+region, pci */ } err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) - goto err_out_free_res; + goto clean3; /* vaddr, intmode+region, pci */ err = hpsa_find_cfgtables(h); if (err) - goto err_out_free_res; + goto clean3; /* vaddr, intmode+region, pci */ hpsa_find_board_params(h); if (!hpsa_CISS_signature_present(h)) { err = -ENODEV; - goto err_out_free_res; + goto clean4; /* cfgtables, vaddr, intmode+region, pci */ } - hpsa_enable_scsi_prefetch(h); + hpsa_set_driver_support_bits(h); hpsa_p600_dma_prefetch_quirk(h); err = hpsa_enter_simple_mode(h); if (err) - goto err_out_free_res; + goto clean4; /* cfgtables, vaddr, intmode+region, pci */ return 0; -err_out_free_res: - if (h->transtable) - iounmap(h->transtable); - if (h->cfgtable) - iounmap(h->cfgtable); - if (h->vaddr) - iounmap(h->vaddr); +clean4: /* cfgtables, vaddr, intmode+region, pci */ + hpsa_free_cfgtables(h); +clean3: /* vaddr, intmode+region, pci */ + iounmap(h->vaddr); + h->vaddr = NULL; +clean2: /* intmode+region, pci */ + hpsa_disable_interrupt_mode(h); + /* + * call pci_disable_device before pci_release_regions per + * Documentation/PCI/pci.txt + */ pci_disable_device(h->pdev); pci_release_regions(h->pdev); return err; @@ -4432,9 +7840,10 @@ } } -static int hpsa_init_reset_devices(struct pci_dev *pdev) +static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id) { int rc, i; + void __iomem *vaddr; if (!reset_devices) return 0; @@ -4455,23 +7864,30 @@ dev_warn(&pdev->dev, "failed to enable device.\n"); return -ENODEV; } + pci_set_master(pdev); + + vaddr = pci_ioremap_bar(pdev, 0); + if (vaddr == NULL) { + rc = -ENOMEM; + goto out_disable; + } + writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); + iounmap(vaddr); + /* Reset the controller with a PCI power-cycle or via doorbell */ - rc = hpsa_kdump_hard_reset_controller(pdev); + rc = hpsa_kdump_hard_reset_controller(pdev, board_id); /* -ENOTSUPP here means we cannot reset the controller * but it's already (and still) up and running in * "performant mode". Or, it might be 640x, which can't reset * due to concerns about shared bbwc between 6402/6404 pair. */ - if (rc) { - if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */ - rc = -ENODEV; + if (rc) goto out_disable; - } /* Now try to get the controller to respond to a no-op */ - dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); + dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { if (hpsa_noop(pdev) == 0) break; @@ -4486,7 +7902,29 @@ return rc; } -static int hpsa_allocate_cmd_pool(struct ctlr_info *h) +static void hpsa_free_cmd_pool(struct ctlr_info *h) +{ + kfree(h->cmd_pool_bits); + h->cmd_pool_bits = NULL; + if (h->cmd_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(struct CommandList), + h->cmd_pool, + h->cmd_pool_dhandle); + h->cmd_pool = NULL; + h->cmd_pool_dhandle = 0; + } + if (h->errinfo_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(struct ErrorInfo), + h->errinfo_pool, + h->errinfo_pool_dhandle); + h->errinfo_pool = NULL; + h->errinfo_pool_dhandle = 0; + } +} + +static int hpsa_alloc_cmd_pool(struct ctlr_info *h) { h->cmd_pool_bits = kzalloc( DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * @@ -4501,26 +7939,51 @@ || (h->cmd_pool == NULL) || (h->errinfo_pool == NULL)) { dev_err(&h->pdev->dev, "out of memory in %s", __func__); - return -ENOMEM; + goto clean_up; } + hpsa_preinitialize_commands(h); return 0; +clean_up: + hpsa_free_cmd_pool(h); + return -ENOMEM; } -static void hpsa_free_cmd_pool(struct ctlr_info *h) +static void hpsa_irq_affinity_hints(struct ctlr_info *h) { - kfree(h->cmd_pool_bits); - if (h->cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct CommandList), - h->cmd_pool, h->cmd_pool_dhandle); - if (h->errinfo_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct ErrorInfo), - h->errinfo_pool, - h->errinfo_pool_dhandle); + int i, cpu; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < h->msix_vector; i++) { + irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } } -static int hpsa_request_irq(struct ctlr_info *h, +/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ +static void hpsa_free_irqs(struct ctlr_info *h) +{ + int i; + + if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { + /* Single reply queue, only one irq to free */ + i = h->intr_mode; + irq_set_affinity_hint(h->intr[i], NULL); + free_irq(h->intr[i], &h->q[i]); + h->q[i] = 0; + return; + } + + for (i = 0; i < h->msix_vector; i++) { + irq_set_affinity_hint(h->intr[i], NULL); + free_irq(h->intr[i], &h->q[i]); + h->q[i] = 0; + } + for (; i < MAX_REPLY_QUEUES; i++) + h->q[i] = 0; +} + +/* returns 0 on success; cleans up and returns -Enn on error */ +static int hpsa_request_irqs(struct ctlr_info *h, irqreturn_t (*msixhandler)(int, void *), irqreturn_t (*intxhandler)(int, void *)) { @@ -4533,27 +7996,56 @@ for (i = 0; i < MAX_REPLY_QUEUES; i++) h->q[i] = (u8) i; - if (h->intr_mode == PERF_MODE_INT && h->msix_vector) { + if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { /* If performant mode and MSI-X, use multiple reply queues */ - for (i = 0; i < MAX_REPLY_QUEUES; i++) + for (i = 0; i < h->msix_vector; i++) { + sprintf(h->intrname[i], "%s-msix%d", h->devname, i); rc = request_irq(h->intr[i], msixhandler, - 0, h->devname, + 0, h->intrname[i], &h->q[i]); + if (rc) { + int j; + + dev_err(&h->pdev->dev, + "failed to get irq %d for %s\n", + h->intr[i], h->devname); + for (j = 0; j < i; j++) { + free_irq(h->intr[j], &h->q[j]); + h->q[j] = 0; + } + for (; j < MAX_REPLY_QUEUES; j++) + h->q[j] = 0; + return rc; + } + } + hpsa_irq_affinity_hints(h); } else { /* Use single reply pool */ - if (h->msix_vector || h->msi_vector) { + if (h->msix_vector > 0 || h->msi_vector) { + if (h->msix_vector) + sprintf(h->intrname[h->intr_mode], + "%s-msix", h->devname); + else + sprintf(h->intrname[h->intr_mode], + "%s-msi", h->devname); rc = request_irq(h->intr[h->intr_mode], - msixhandler, 0, h->devname, + msixhandler, 0, + h->intrname[h->intr_mode], &h->q[h->intr_mode]); } else { + sprintf(h->intrname[h->intr_mode], + "%s-intx", h->devname); rc = request_irq(h->intr[h->intr_mode], - intxhandler, IRQF_SHARED, h->devname, + intxhandler, IRQF_SHARED, + h->intrname[h->intr_mode], &h->q[h->intr_mode]); } + irq_set_affinity_hint(h->intr[h->intr_mode], NULL); } if (rc) { - dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", + dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", h->intr[h->intr_mode], h->devname); + hpsa_free_irqs(h); return -ENODEV; } return 0; @@ -4561,131 +8053,135 @@ static int hpsa_kdump_soft_reset(struct ctlr_info *h) { - if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, - HPSA_RESET_TYPE_CONTROLLER)) { - dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); - return -EIO; - } + int rc; + hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER); dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); - if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { + rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); + if (rc) { dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); - return -1; + return rc; } dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); - if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { + rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); + if (rc) { dev_warn(&h->pdev->dev, "Board failed to become ready " "after soft reset.\n"); - return -1; + return rc; } return 0; } -static void free_irqs(struct ctlr_info *h) +static void hpsa_free_reply_queues(struct ctlr_info *h) { int i; - if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { - /* Single reply queue, only one irq to free */ - i = h->intr_mode; - free_irq(h->intr[i], &h->q[i]); - return; + for (i = 0; i < h->nreply_queues; i++) { + if (!h->reply_queue[i].head) + continue; + pci_free_consistent(h->pdev, + h->reply_queue_size, + h->reply_queue[i].head, + h->reply_queue[i].busaddr); + h->reply_queue[i].head = NULL; + h->reply_queue[i].busaddr = 0; } - - for (i = 0; i < MAX_REPLY_QUEUES; i++) - free_irq(h->intr[i], &h->q[i]); + h->reply_queue_size = 0; } -static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) +static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) { - free_irqs(h); -#ifdef CONFIG_PCI_MSI - if (h->msix_vector) { - if (h->pdev->msix_enabled) - pci_disable_msix(h->pdev); - } else if (h->msi_vector) { - if (h->pdev->msi_enabled) - pci_disable_msi(h->pdev); + hpsa_free_performant_mode(h); /* init_one 7 */ + hpsa_free_sg_chain_blocks(h); /* init_one 6 */ + hpsa_free_cmd_pool(h); /* init_one 5 */ + hpsa_free_irqs(h); /* init_one 4 */ + scsi_host_put(h->scsi_host); /* init_one 3 */ + h->scsi_host = NULL; /* init_one 3 */ + hpsa_free_pci_init(h); /* init_one 2_5 */ + free_percpu(h->lockup_detected); /* init_one 2 */ + h->lockup_detected = NULL; /* init_one 2 */ + if (h->resubmit_wq) { + destroy_workqueue(h->resubmit_wq); /* init_one 1 */ + h->resubmit_wq = NULL; + } + if (h->rescan_ctlr_wq) { + destroy_workqueue(h->rescan_ctlr_wq); + h->rescan_ctlr_wq = NULL; } -#endif /* CONFIG_PCI_MSI */ + kfree(h); /* init_one 1 */ } -static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) +/* Called when controller lockup detected. */ +static void fail_all_outstanding_cmds(struct ctlr_info *h) { - hpsa_free_irqs_and_disable_msix(h); - hpsa_free_sg_chain_blocks(h); - hpsa_free_cmd_pool(h); - kfree(h->blockFetchTable); - pci_free_consistent(h->pdev, h->reply_pool_size, - h->reply_pool, h->reply_pool_dhandle); - if (h->vaddr) - iounmap(h->vaddr); - if (h->transtable) - iounmap(h->transtable); - if (h->cfgtable) - iounmap(h->cfgtable); - pci_disable_device(h->pdev); - pci_release_regions(h->pdev); - kfree(h); -} + int i, refcount; + struct CommandList *c; + int failcount = 0; -static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h) -{ - assert_spin_locked(&lockup_detector_lock); - if (!hpsa_lockup_detector) - return; - if (h->lockup_detected) - return; /* already stopped the lockup detector */ - list_del(&h->lockup_list); + flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ + for (i = 0; i < h->nr_cmds; i++) { + c = h->cmd_pool + i; + refcount = atomic_inc_return(&c->refcount); + if (refcount > 1) { + c->err_info->CommandStatus = CMD_CTLR_LOCKUP; + finish_cmd(c); + atomic_dec(&h->commands_outstanding); + failcount++; + } + cmd_free(h, c); + } + dev_warn(&h->pdev->dev, + "failed %d commands in fail_all\n", failcount); } -/* Called when controller lockup detected. */ -static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) +static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) { - struct CommandList *c = NULL; + int cpu; - assert_spin_locked(&h->lock); - /* Mark all outstanding commands as failed and complete them. */ - while (!list_empty(list)) { - c = list_entry(list->next, struct CommandList, list); - c->err_info->CommandStatus = CMD_HARDWARE_ERR; - finish_cmd(c); + for_each_online_cpu(cpu) { + u32 *lockup_detected; + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); + *lockup_detected = value; } + wmb(); /* be sure the per-cpu variables are out to memory */ } static void controller_lockup_detected(struct ctlr_info *h) { unsigned long flags; + u32 lockup_detected; - assert_spin_locked(&lockup_detector_lock); - remove_ctlr_from_lockup_detector_list(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); spin_lock_irqsave(&h->lock, flags); - h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); + lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); + if (!lockup_detected) { + /* no heartbeat, but controller gave us a zero. */ + dev_warn(&h->pdev->dev, + "lockup detected after %d but scratchpad register is zero\n", + h->heartbeat_sample_interval / HZ); + lockup_detected = 0xffffffff; + } + set_lockup_detected_for_all_cpus(h, lockup_detected); spin_unlock_irqrestore(&h->lock, flags); - dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", - h->lockup_detected); + dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", + lockup_detected, h->heartbeat_sample_interval / HZ); pci_disable_device(h->pdev); - spin_lock_irqsave(&h->lock, flags); - fail_all_cmds_on_list(h, &h->cmpQ); - fail_all_cmds_on_list(h, &h->reqQ); - spin_unlock_irqrestore(&h->lock, flags); + fail_all_outstanding_cmds(h); } -static void detect_controller_lockup(struct ctlr_info *h) +static int detect_controller_lockup(struct ctlr_info *h) { u64 now; u32 heartbeat; unsigned long flags; - assert_spin_locked(&lockup_detector_lock); now = get_jiffies_64(); /* If we've received an interrupt recently, we're ok. */ if (time_after64(h->last_intr_timestamp + (h->heartbeat_sample_interval), now)) - return; + return false; /* * If we've already checked the heartbeat recently, we're ok. @@ -4694,7 +8190,7 @@ */ if (time_after64(h->last_heartbeat_timestamp + (h->heartbeat_sample_interval), now)) - return; + return false; /* If heartbeat has not changed since we last looked, we're not ok. */ spin_lock_irqsave(&h->lock, flags); @@ -4702,76 +8198,204 @@ spin_unlock_irqrestore(&h->lock, flags); if (h->last_heartbeat == heartbeat) { controller_lockup_detected(h); - return; + return true; } /* We're ok. */ h->last_heartbeat = heartbeat; h->last_heartbeat_timestamp = now; + return false; } -static int detect_controller_lockup_thread(void *notused) +static void hpsa_ack_ctlr_events(struct ctlr_info *h) { - struct ctlr_info *h; - unsigned long flags; + int i; + char *event_type; - while (1) { - struct list_head *this, *tmp; + if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) + return; - schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL); - if (kthread_should_stop()) - break; - spin_lock_irqsave(&lockup_detector_lock, flags); - list_for_each_safe(this, tmp, &hpsa_ctlr_list) { - h = list_entry(this, struct ctlr_info, lockup_list); - detect_controller_lockup(h); - } - spin_unlock_irqrestore(&lockup_detector_lock, flags); + /* Ask the controller to clear the events we're handling. */ + if ((h->transMethod & (CFGTBL_Trans_io_accel1 + | CFGTBL_Trans_io_accel2)) && + (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || + h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { + + if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) + event_type = "state change"; + if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) + event_type = "configuration change"; + /* Stop sending new RAID offload reqs via the IO accelerator */ + scsi_block_requests(h->scsi_host); + for (i = 0; i < h->ndevices; i++) + h->dev[i]->offload_enabled = 0; + hpsa_drain_accel_commands(h); + /* Set 'accelerator path config change' bit */ + dev_warn(&h->pdev->dev, + "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", + h->events, event_type); + writel(h->events, &(h->cfgtable->clear_event_notify)); + /* Set the "clear event notify field update" bit 6 */ + writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); + /* Wait until ctlr clears 'clear event notify field', bit 6 */ + hpsa_wait_for_clear_event_notify_ack(h); + scsi_unblock_requests(h->scsi_host); + } else { + /* Acknowledge controller notification events. */ + writel(h->events, &(h->cfgtable->clear_event_notify)); + writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); + hpsa_wait_for_clear_event_notify_ack(h); +#if 0 + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); + hpsa_wait_for_mode_change_ack(h); +#endif } - return 0; + return; } -static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h) +/* Check a register on the controller to see if there are configuration + * changes (added/changed/removed logical drives, etc.) which mean that + * we should rescan the controller for devices. + * Also check flag for driver-initiated rescan. + */ +static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) +{ + if (h->drv_req_rescan) { + h->drv_req_rescan = 0; + return 1; + } + + if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) + return 0; + + h->events = readl(&(h->cfgtable->event_notify)); + return h->events & RESCAN_REQUIRED_EVENT_BITS; +} + +/* + * Check if any of the offline devices have become ready + */ +static int hpsa_offline_devices_ready(struct ctlr_info *h) { unsigned long flags; + struct offline_device_entry *d; + struct list_head *this, *tmp; - h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; - spin_lock_irqsave(&lockup_detector_lock, flags); - list_add_tail(&h->lockup_list, &hpsa_ctlr_list); - spin_unlock_irqrestore(&lockup_detector_lock, flags); + spin_lock_irqsave(&h->offline_device_lock, flags); + list_for_each_safe(this, tmp, &h->offline_device_list) { + d = list_entry(this, struct offline_device_entry, + offline_list); + spin_unlock_irqrestore(&h->offline_device_lock, flags); + if (!hpsa_volume_offline(h, d->scsi3addr)) { + spin_lock_irqsave(&h->offline_device_lock, flags); + list_del(&d->offline_list); + spin_unlock_irqrestore(&h->offline_device_lock, flags); + return 1; + } + spin_lock_irqsave(&h->offline_device_lock, flags); + } + spin_unlock_irqrestore(&h->offline_device_lock, flags); + return 0; } -static void start_controller_lockup_detector(struct ctlr_info *h) +static int hpsa_luns_changed(struct ctlr_info *h) { - /* Start the lockup detector thread if not already started */ - if (!hpsa_lockup_detector) { - spin_lock_init(&lockup_detector_lock); - hpsa_lockup_detector = - kthread_run(detect_controller_lockup_thread, - NULL, HPSA); + int rc = 1; /* assume there are changes */ + struct ReportLUNdata *logdev = NULL; + + /* if we can't find out if lun data has changed, + * assume that it has. + */ + + if (!h->lastlogicals) + goto out; + + logdev = kzalloc(sizeof(*logdev), GFP_KERNEL); + if (!logdev) { + dev_warn(&h->pdev->dev, + "Out of memory, can't track lun changes.\n"); + goto out; } - if (!hpsa_lockup_detector) { + if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { dev_warn(&h->pdev->dev, - "Could not start lockup detector thread\n"); - return; + "report luns failed, can't track lun changes.\n"); + goto out; } - add_ctlr_to_lockup_detector_list(h); + if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) { + dev_info(&h->pdev->dev, + "Lun changes detected.\n"); + memcpy(h->lastlogicals, logdev, sizeof(*logdev)); + goto out; + } else + rc = 0; /* no changes detected. */ +out: + kfree(logdev); + return rc; } -static void stop_controller_lockup_detector(struct ctlr_info *h) +static void hpsa_rescan_ctlr_worker(struct work_struct *work) { unsigned long flags; + struct ctlr_info *h = container_of(to_delayed_work(work), + struct ctlr_info, rescan_ctlr_work); - spin_lock_irqsave(&lockup_detector_lock, flags); - remove_ctlr_from_lockup_detector_list(h); - /* If the list of ctlr's to monitor is empty, stop the thread */ - if (list_empty(&hpsa_ctlr_list)) { - spin_unlock_irqrestore(&lockup_detector_lock, flags); - kthread_stop(hpsa_lockup_detector); - spin_lock_irqsave(&lockup_detector_lock, flags); - hpsa_lockup_detector = NULL; + + if (h->remove_in_progress) + return; + + if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { + scsi_host_get(h->scsi_host); + hpsa_ack_ctlr_events(h); + hpsa_scan_start(h->scsi_host); + scsi_host_put(h->scsi_host); + } else if (h->discovery_polling) { + hpsa_disable_rld_caching(h); + if (hpsa_luns_changed(h)) { + struct Scsi_Host *sh = NULL; + + dev_info(&h->pdev->dev, + "driver discovery polling rescan.\n"); + sh = scsi_host_get(h->scsi_host); + if (sh != NULL) { + hpsa_scan_start(sh); + scsi_host_put(sh); + } + } } - spin_unlock_irqrestore(&lockup_detector_lock, flags); + spin_lock_irqsave(&h->lock, flags); + if (!h->remove_in_progress) + queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, + h->heartbeat_sample_interval); + spin_unlock_irqrestore(&h->lock, flags); +} + +static void hpsa_monitor_ctlr_worker(struct work_struct *work) +{ + unsigned long flags; + struct ctlr_info *h = container_of(to_delayed_work(work), + struct ctlr_info, monitor_ctlr_work); + + detect_controller_lockup(h); + if (lockup_detected(h)) + return; + + spin_lock_irqsave(&h->lock, flags); + if (!h->remove_in_progress) + schedule_delayed_work(&h->monitor_ctlr_work, + h->heartbeat_sample_interval); + spin_unlock_irqrestore(&h->lock, flags); +} + +static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, + char *name) +{ + struct workqueue_struct *wq = NULL; + + wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); + if (!wq) + dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); + + return wq; } static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -4780,11 +8404,18 @@ struct ctlr_info *h; int try_soft_reset = 0; unsigned long flags; + u32 board_id; if (number_of_controllers == 0) printk(KERN_INFO DRIVER_NAME "\n"); - rc = hpsa_init_reset_devices(pdev); + rc = hpsa_lookup_board_id(pdev, &board_id); + if (rc < 0) { + dev_warn(&pdev->dev, "Board ID not found\n"); + return rc; + } + + rc = hpsa_init_reset_devices(pdev, board_id); if (rc) { if (rc != -ENOTSUPP) return rc; @@ -4803,23 +8434,43 @@ * the 5 lower bits of the address are used by the hardware. and by * the driver. See comments in hpsa.h for more info. */ -#define COMMANDLIST_ALIGNMENT 32 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); h = kzalloc(sizeof(*h), GFP_KERNEL); - if (!h) + if (!h) { + dev_err(&pdev->dev, "Failed to allocate controller head\n"); return -ENOMEM; + } h->pdev = pdev; + h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; - INIT_LIST_HEAD(&h->cmpQ); - INIT_LIST_HEAD(&h->reqQ); + INIT_LIST_HEAD(&h->offline_device_list); spin_lock_init(&h->lock); + spin_lock_init(&h->offline_device_lock); spin_lock_init(&h->scan_lock); + atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); + atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS); + + /* Allocate and clear per-cpu variable lockup_detected */ + h->lockup_detected = alloc_percpu(u32); + if (!h->lockup_detected) { + dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); + rc = -ENOMEM; + goto clean1; /* aer/h */ + } + set_lockup_detected_for_all_cpus(h, 0); + rc = hpsa_pci_init(h); - if (rc != 0) - goto clean1; + if (rc) + goto clean2; /* lu, aer/h */ + + /* relies on h-> settings made by hpsa_pci_init, including + * interrupt_mode h->intr */ + rc = hpsa_scsi_host_alloc(h); + if (rc) + goto clean2_5; /* pci, lu, aer/h */ - sprintf(h->devname, HPSA "%d", number_of_controllers); + sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); h->ctlr = number_of_controllers; number_of_controllers++; @@ -4833,32 +8484,56 @@ dac = 0; } else { dev_err(&pdev->dev, "no suitable DMA available\n"); - goto clean1; + goto clean3; /* shost, pci, lu, aer/h */ } } /* make sure the board interrupts are off */ h->access.set_intr_mask(h, HPSA_INTR_OFF); - if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) - goto clean2; - dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", - h->devname, pdev->device, - h->intr[h->intr_mode], dac ? "" : " not"); - if (hpsa_allocate_cmd_pool(h)) - goto clean4; - if (hpsa_allocate_sg_chain_blocks(h)) - goto clean4; + rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); + if (rc) + goto clean3; /* shost, pci, lu, aer/h */ + rc = hpsa_alloc_cmd_pool(h); + if (rc) + goto clean4; /* irq, shost, pci, lu, aer/h */ + rc = hpsa_alloc_sg_chain_blocks(h); + if (rc) + goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ init_waitqueue_head(&h->scan_wait_queue); + init_waitqueue_head(&h->abort_cmd_wait_queue); + init_waitqueue_head(&h->event_sync_wait_queue); + mutex_init(&h->reset_mutex); h->scan_finished = 1; /* no scan currently in progress */ pci_set_drvdata(pdev, h); h->ndevices = 0; - h->scsi_host = NULL; + spin_lock_init(&h->devlock); - hpsa_put_ctlr_into_performant_mode(h); + rc = hpsa_put_ctlr_into_performant_mode(h); + if (rc) + goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ - /* At this point, the controller is ready to take commands. + /* hook into SCSI subsystem */ + rc = hpsa_scsi_add_host(h); + if (rc) + goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + + /* create the resubmit workqueue */ + h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); + if (!h->rescan_ctlr_wq) { + rc = -ENOMEM; + goto clean7; + } + + h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); + if (!h->resubmit_wq) { + rc = -ENOMEM; + goto clean7; /* aer/h */ + } + + /* + * At this point, the controller is ready to take commands. * Now, if reset_devices and the hard reset didn't work, try * the soft reset and see if that works. */ @@ -4874,19 +8549,30 @@ spin_lock_irqsave(&h->lock, flags); h->access.set_intr_mask(h, HPSA_INTR_OFF); spin_unlock_irqrestore(&h->lock, flags); - free_irqs(h); - rc = hpsa_request_irq(h, hpsa_msix_discard_completions, + hpsa_free_irqs(h); + rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, hpsa_intx_discard_completions); if (rc) { - dev_warn(&h->pdev->dev, "Failed to request_irq after " - "soft reset.\n"); - goto clean4; + dev_warn(&h->pdev->dev, + "Failed to request_irq after soft reset.\n"); + /* + * cannot goto clean7 or free_irqs will be called + * again. Instead, do its work + */ + hpsa_free_performant_mode(h); /* clean7 */ + hpsa_free_sg_chain_blocks(h); /* clean6 */ + hpsa_free_cmd_pool(h); /* clean5 */ + /* + * skip hpsa_free_irqs(h) clean4 since that + * was just called before request_irqs failed + */ + goto clean3; } rc = hpsa_kdump_soft_reset(h); if (rc) /* Neither hard nor soft reset worked, we're hosed. */ - goto clean4; + goto clean7; dev_info(&h->pdev->dev, "Board READY.\n"); dev_info(&h->pdev->dev, @@ -4907,26 +8593,66 @@ hpsa_undo_allocations_after_kdump_soft_reset(h); try_soft_reset = 0; if (rc) - /* don't go to clean4, we already unallocated */ + /* don't goto clean, we already unallocated */ return -ENODEV; goto reinit_after_soft_reset; } + /* Enable Accelerated IO path at driver layer */ + h->acciopath_status = 1; + /* Disable discovery polling.*/ + h->discovery_polling = 0; + + /* Turn the interrupts on so we can service requests */ h->access.set_intr_mask(h, HPSA_INTR_ON); hpsa_hba_inquiry(h); - hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ - start_controller_lockup_detector(h); + + h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL); + if (!h->lastlogicals) + dev_info(&h->pdev->dev, + "Can't track change to report lun data\n"); + + /* Monitor the controller for firmware lockups */ + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; + INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); + schedule_delayed_work(&h->monitor_ctlr_work, + h->heartbeat_sample_interval); + INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); + queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, + h->heartbeat_sample_interval); return 0; -clean4: +clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + hpsa_free_performant_mode(h); + h->access.set_intr_mask(h, HPSA_INTR_OFF); +clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ hpsa_free_sg_chain_blocks(h); +clean5: /* cmd, irq, shost, pci, lu, aer/h */ hpsa_free_cmd_pool(h); - free_irqs(h); -clean2: -clean1: +clean4: /* irq, shost, pci, lu, aer/h */ + hpsa_free_irqs(h); +clean3: /* shost, pci, lu, aer/h */ + scsi_host_put(h->scsi_host); + h->scsi_host = NULL; +clean2_5: /* pci, lu, aer/h */ + hpsa_free_pci_init(h); +clean2: /* lu, aer/h */ + if (h->lockup_detected) { + free_percpu(h->lockup_detected); + h->lockup_detected = NULL; + } +clean1: /* wq/aer/h */ + if (h->resubmit_wq) { + destroy_workqueue(h->resubmit_wq); + h->resubmit_wq = NULL; + } + if (h->rescan_ctlr_wq) { + destroy_workqueue(h->rescan_ctlr_wq); + h->rescan_ctlr_wq = NULL; + } kfree(h); return rc; } @@ -4935,30 +8661,97 @@ { char *flush_buf; struct CommandList *c; + int rc; + if (unlikely(lockup_detected(h))) + return; flush_buf = kzalloc(4, GFP_KERNEL); if (!flush_buf) return; - c = cmd_special_alloc(h); - if (!c) { - dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); - goto out_of_memory; - } + c = cmd_alloc(h); + if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, RAID_CTLR_LUNID, TYPE_CMD)) { goto out; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_TODEVICE, NO_TIMEOUT); + if (rc) + goto out; if (c->err_info->CommandStatus != 0) out: dev_warn(&h->pdev->dev, "error flushing cache on controller\n"); - cmd_special_free(h, c); -out_of_memory: + cmd_free(h, c); kfree(flush_buf); } +/* Make controller gather fresh report lun data each time we + * send down a report luns request + */ +static void hpsa_disable_rld_caching(struct ctlr_info *h) +{ + u32 *options; + struct CommandList *c; + int rc; + + /* Don't bother trying to set diag options if locked up */ + if (unlikely(h->lockup_detected)) + return; + + options = kzalloc(sizeof(*options), GFP_KERNEL); + if (!options) { + dev_err(&h->pdev->dev, + "Error: failed to disable rld caching, during alloc.\n"); + return; + } + + c = cmd_alloc(h); + + /* first, get the current diag options settings */ + if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, + RAID_CTLR_LUNID, TYPE_CMD)) + goto errout; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if ((rc != 0) || (c->err_info->CommandStatus != 0)) + goto errout; + + /* Now, set the bit for disabling the RLD caching */ + *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING; + + if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0, + RAID_CTLR_LUNID, TYPE_CMD)) + goto errout; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_TODEVICE, NO_TIMEOUT); + if ((rc != 0) || (c->err_info->CommandStatus != 0)) + goto errout; + + /* Now verify that it got set: */ + if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, + RAID_CTLR_LUNID, TYPE_CMD)) + goto errout; + + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if ((rc != 0) || (c->err_info->CommandStatus != 0)) + goto errout; + + if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) + goto out; + +errout: + dev_err(&h->pdev->dev, + "Error: failed to disable report lun data caching.\n"); +out: + cmd_free(h, c); + kfree(options); +} + static void hpsa_shutdown(struct pci_dev *pdev) { struct ctlr_info *h; @@ -4970,49 +8763,77 @@ */ hpsa_flush_cache(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); - hpsa_free_irqs_and_disable_msix(h); + hpsa_free_irqs(h); /* init_one 4 */ + hpsa_disable_interrupt_mode(h); /* pci_init 2 */ } static void hpsa_free_device_info(struct ctlr_info *h) { int i; - for (i = 0; i < h->ndevices; i++) + for (i = 0; i < h->ndevices; i++) { kfree(h->dev[i]); + h->dev[i] = NULL; + } } static void hpsa_remove_one(struct pci_dev *pdev) { struct ctlr_info *h; + unsigned long flags; if (pci_get_drvdata(pdev) == NULL) { dev_err(&pdev->dev, "unable to remove device\n"); return; } h = pci_get_drvdata(pdev); - stop_controller_lockup_detector(h); - hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ + + /* Get rid of any controller monitoring work items */ + spin_lock_irqsave(&h->lock, flags); + h->remove_in_progress = 1; + spin_unlock_irqrestore(&h->lock, flags); + cancel_delayed_work_sync(&h->monitor_ctlr_work); + cancel_delayed_work_sync(&h->rescan_ctlr_work); + destroy_workqueue(h->rescan_ctlr_wq); + destroy_workqueue(h->resubmit_wq); + + /* + * Call before disabling interrupts. + * scsi_remove_host can trigger I/O operations especially + * when multipath is enabled. There can be SYNCHRONIZE CACHE + * operations which cannot complete and will hang the system. + */ + if (h->scsi_host) + scsi_remove_host(h->scsi_host); /* init_one 8 */ + /* includes hpsa_free_irqs - init_one 4 */ + /* includes hpsa_disable_interrupt_mode - pci_init 2 */ hpsa_shutdown(pdev); - iounmap(h->vaddr); - iounmap(h->transtable); - iounmap(h->cfgtable); - hpsa_free_device_info(h); - hpsa_free_sg_chain_blocks(h); - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct CommandList), - h->cmd_pool, h->cmd_pool_dhandle); - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct ErrorInfo), - h->errinfo_pool, h->errinfo_pool_dhandle); - pci_free_consistent(h->pdev, h->reply_pool_size, - h->reply_pool, h->reply_pool_dhandle); - kfree(h->cmd_pool_bits); - kfree(h->blockFetchTable); - kfree(h->hba_inquiry_data); - pci_disable_device(pdev); - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); - kfree(h); + + hpsa_free_device_info(h); /* scan */ + + kfree(h->hba_inquiry_data); /* init_one 10 */ + h->hba_inquiry_data = NULL; /* init_one 10 */ + hpsa_free_ioaccel2_sg_chain_blocks(h); + hpsa_free_performant_mode(h); /* init_one 7 */ + hpsa_free_sg_chain_blocks(h); /* init_one 6 */ + hpsa_free_cmd_pool(h); /* init_one 5 */ + kfree(h->lastlogicals); + + /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */ + + scsi_host_put(h->scsi_host); /* init_one 3 */ + h->scsi_host = NULL; /* init_one 3 */ + + /* includes hpsa_disable_interrupt_mode - pci_init 2 */ + hpsa_free_pci_init(h); /* init_one 2.5 */ + + free_percpu(h->lockup_detected); /* init_one 2 */ + h->lockup_detected = NULL; /* init_one 2 */ + /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ + + hpsa_delete_sas_host(h); + + kfree(h); /* init_one 1 */ } static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, @@ -5049,20 +8870,17 @@ * bits of the command address. */ static void calc_bucket_map(int bucket[], int num_buckets, - int nsgs, int *bucket_map) + int nsgs, int min_blocks, u32 *bucket_map) { int i, j, b, size; - /* even a command with 0 SGs requires 4 blocks */ -#define MINIMUM_TRANSFER_BLOCKS 4 -#define NUM_BUCKETS 8 /* Note, bucket_map must have nsgs+1 entries. */ for (i = 0; i <= nsgs; i++) { /* Compute size of a command with i SG entries */ - size = i + MINIMUM_TRANSFER_BLOCKS; + size = i + min_blocks; b = num_buckets; /* Assume the biggest bucket */ /* Find the bucket that is just big enough */ - for (j = 0; j < 8; j++) { + for (j = 0; j < num_buckets; j++) { if (bucket[j] >= size) { b = j; break; @@ -5073,10 +8891,20 @@ } } -static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) +/* + * return -ENODEV on err, 0 on success (or no action) + * allocates numerous items that must be freed later + */ +static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) { int i; unsigned long register_value; + unsigned long transMethod = CFGTBL_Trans_Performant | + (trans_support & CFGTBL_Trans_use_short_tags) | + CFGTBL_Trans_enable_directed_msix | + (trans_support & (CFGTBL_Trans_io_accel1 | + CFGTBL_Trans_io_accel2)); + struct access_method access = SA5_performant_access; /* This is a bit complicated. There are 8 registers on * the controller which we write to to tell it 8 different @@ -5096,6 +8924,16 @@ * sizes for small commands, and fewer sizes for larger commands. */ int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; +#define MIN_IOACCEL2_BFT_ENTRY 5 +#define HPSA_IOACCEL2_HEADER_SZ 4 + int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, + HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; + BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); + BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); + BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > + 16 * MIN_IOACCEL2_BFT_ENTRY); + BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); /* 5 = 1 s/g entry or 4k * 6 = 2 s/g entry or 8k @@ -5103,12 +8941,20 @@ * 10 = 6 s/g entry or 24k */ + /* If the controller supports either ioaccel method then + * we can also use the RAID stack submit path that does not + * perform the superfluous readl() after each command submission. + */ + if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) + access = SA5_performant_access_no_read; + /* Controller spec: zero out this buffer. */ - memset(h->reply_pool, 0, h->reply_pool_size); + for (i = 0; i < h->nreply_queues; i++) + memset(h->reply_queue[i].head, 0, h->reply_queue_size); bft[7] = SG_ENTRIES_IN_CMD + 4; calc_bucket_map(bft, ARRAY_SIZE(bft), - SG_ENTRIES_IN_CMD, h->blockFetchTable); + SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); for (i = 0; i < 8; i++) writel(bft[i], &h->transtable->BlockFetch[i]); @@ -5120,48 +8966,281 @@ for (i = 0; i < h->nreply_queues; i++) { writel(0, &h->transtable->RepQAddr[i].upper); - writel(h->reply_pool_dhandle + - (h->max_commands * sizeof(u64) * i), + writel(h->reply_queue[i].busaddr, &h->transtable->RepQAddr[i].lower); } - writel(CFGTBL_Trans_Performant | use_short_tags | - CFGTBL_Trans_enable_directed_msix, - &(h->cfgtable->HostWrite.TransportRequest)); + writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); + writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); + /* + * enable outbound interrupt coalescing in accelerator mode; + */ + if (trans_support & CFGTBL_Trans_io_accel1) { + access = SA5_ioaccel_mode1_access; + writel(10, &h->cfgtable->HostWrite.CoalIntDelay); + writel(4, &h->cfgtable->HostWrite.CoalIntCount); + } else { + if (trans_support & CFGTBL_Trans_io_accel2) { + access = SA5_ioaccel_mode2_access; + writel(10, &h->cfgtable->HostWrite.CoalIntDelay); + writel(4, &h->cfgtable->HostWrite.CoalIntCount); + } + } writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); - hpsa_wait_for_mode_change_ack(h); + if (hpsa_wait_for_mode_change_ack(h)) { + dev_err(&h->pdev->dev, + "performant mode problem - doorbell timeout\n"); + return -ENODEV; + } register_value = readl(&(h->cfgtable->TransportActive)); if (!(register_value & CFGTBL_Trans_Performant)) { - dev_warn(&h->pdev->dev, "unable to get board into" - " performant mode\n"); - return; + dev_err(&h->pdev->dev, + "performant mode problem - transport not active\n"); + return -ENODEV; } /* Change the access methods to the performant access methods */ - h->access = SA5_performant_access; - h->transMethod = CFGTBL_Trans_Performant; + h->access = access; + h->transMethod = transMethod; + + if (!((trans_support & CFGTBL_Trans_io_accel1) || + (trans_support & CFGTBL_Trans_io_accel2))) + return 0; + + if (trans_support & CFGTBL_Trans_io_accel1) { + /* Set up I/O accelerator mode */ + for (i = 0; i < h->nreply_queues; i++) { + writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); + h->reply_queue[i].current_entry = + readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); + } + bft[7] = h->ioaccel_maxsg + 8; + calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, + h->ioaccel1_blockFetchTable); + + /* initialize all reply queue entries to unused */ + for (i = 0; i < h->nreply_queues; i++) + memset(h->reply_queue[i].head, + (u8) IOACCEL_MODE1_REPLY_UNUSED, + h->reply_queue_size); + + /* set all the constant fields in the accelerator command + * frames once at init time to save CPU cycles later. + */ + for (i = 0; i < h->nr_cmds; i++) { + struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; + + cp->function = IOACCEL1_FUNCTION_SCSIIO; + cp->err_info = (u32) (h->errinfo_pool_dhandle + + (i * sizeof(struct ErrorInfo))); + cp->err_info_len = sizeof(struct ErrorInfo); + cp->sgl_offset = IOACCEL1_SGLOFFSET; + cp->host_context_flags = + cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); + cp->timeout_sec = 0; + cp->ReplyQueue = 0; + cp->tag = + cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); + cp->host_addr = + cpu_to_le64(h->ioaccel_cmd_pool_dhandle + + (i * sizeof(struct io_accel1_cmd))); + } + } else if (trans_support & CFGTBL_Trans_io_accel2) { + u64 cfg_offset, cfg_base_addr_index; + u32 bft2_offset, cfg_base_addr; + int rc; + + rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, + &cfg_base_addr_index, &cfg_offset); + BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); + bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; + calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, + 4, h->ioaccel2_blockFetchTable); + bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); + BUILD_BUG_ON(offsetof(struct CfgTable, + io_accel_request_size_offset) != 0xb8); + h->ioaccel2_bft2_regs = + remap_pci_mem(pci_resource_start(h->pdev, + cfg_base_addr_index) + + cfg_offset + bft2_offset, + ARRAY_SIZE(bft2) * + sizeof(*h->ioaccel2_bft2_regs)); + for (i = 0; i < ARRAY_SIZE(bft2); i++) + writel(bft2[i], &h->ioaccel2_bft2_regs[i]); + } + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); + if (hpsa_wait_for_mode_change_ack(h)) { + dev_err(&h->pdev->dev, + "performant mode problem - enabling ioaccel mode\n"); + return -ENODEV; + } + return 0; +} + +/* Free ioaccel1 mode command blocks and block fetch table */ +static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) +{ + if (h->ioaccel_cmd_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), + h->ioaccel_cmd_pool, + h->ioaccel_cmd_pool_dhandle); + h->ioaccel_cmd_pool = NULL; + h->ioaccel_cmd_pool_dhandle = 0; + } + kfree(h->ioaccel1_blockFetchTable); + h->ioaccel1_blockFetchTable = NULL; +} + +/* Allocate ioaccel1 mode command blocks and block fetch table */ +static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) +{ + h->ioaccel_maxsg = + readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); + if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) + h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; + + /* Command structures must be aligned on a 128-byte boundary + * because the 7 lower bits of the address are used by the + * hardware. + */ + BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % + IOACCEL1_COMMANDLIST_ALIGNMENT); + h->ioaccel_cmd_pool = + pci_alloc_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), + &(h->ioaccel_cmd_pool_dhandle)); + + h->ioaccel1_blockFetchTable = + kmalloc(((h->ioaccel_maxsg + 1) * + sizeof(u32)), GFP_KERNEL); + + if ((h->ioaccel_cmd_pool == NULL) || + (h->ioaccel1_blockFetchTable == NULL)) + goto clean_up; + + memset(h->ioaccel_cmd_pool, 0, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); + return 0; + +clean_up: + hpsa_free_ioaccel1_cmd_and_bft(h); + return -ENOMEM; +} + +/* Free ioaccel2 mode command blocks and block fetch table */ +static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) +{ + hpsa_free_ioaccel2_sg_chain_blocks(h); + + if (h->ioaccel2_cmd_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + h->ioaccel2_cmd_pool, + h->ioaccel2_cmd_pool_dhandle); + h->ioaccel2_cmd_pool = NULL; + h->ioaccel2_cmd_pool_dhandle = 0; + } + kfree(h->ioaccel2_blockFetchTable); + h->ioaccel2_blockFetchTable = NULL; } -static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) +/* Allocate ioaccel2 mode command blocks and block fetch table */ +static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) +{ + int rc; + + /* Allocate ioaccel2 mode command blocks and block fetch table */ + + h->ioaccel_maxsg = + readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); + if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) + h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; + + BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % + IOACCEL2_COMMANDLIST_ALIGNMENT); + h->ioaccel2_cmd_pool = + pci_alloc_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + &(h->ioaccel2_cmd_pool_dhandle)); + + h->ioaccel2_blockFetchTable = + kmalloc(((h->ioaccel_maxsg + 1) * + sizeof(u32)), GFP_KERNEL); + + if ((h->ioaccel2_cmd_pool == NULL) || + (h->ioaccel2_blockFetchTable == NULL)) { + rc = -ENOMEM; + goto clean_up; + } + + rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); + if (rc) + goto clean_up; + + memset(h->ioaccel2_cmd_pool, 0, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); + return 0; + +clean_up: + hpsa_free_ioaccel2_cmd_and_bft(h); + return rc; +} + +/* Free items allocated by hpsa_put_ctlr_into_performant_mode */ +static void hpsa_free_performant_mode(struct ctlr_info *h) +{ + kfree(h->blockFetchTable); + h->blockFetchTable = NULL; + hpsa_free_reply_queues(h); + hpsa_free_ioaccel1_cmd_and_bft(h); + hpsa_free_ioaccel2_cmd_and_bft(h); +} + +/* return -ENODEV on error, 0 on success (or no action) + * allocates numerous items that must be freed later + */ +static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) { u32 trans_support; - int i; + unsigned long transMethod = CFGTBL_Trans_Performant | + CFGTBL_Trans_use_short_tags; + int i, rc; if (hpsa_simple_mode) - return; + return 0; trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & PERFORMANT_MODE)) - return; + return 0; - h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1; + /* Check for I/O accelerator mode support */ + if (trans_support & CFGTBL_Trans_io_accel1) { + transMethod |= CFGTBL_Trans_io_accel1 | + CFGTBL_Trans_enable_directed_msix; + rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); + if (rc) + return rc; + } else if (trans_support & CFGTBL_Trans_io_accel2) { + transMethod |= CFGTBL_Trans_io_accel2 | + CFGTBL_Trans_enable_directed_msix; + rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); + if (rc) + return rc; + } + + h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; hpsa_get_max_perf_mode_cmds(h); /* Performant mode ring buffer and supporting data structures */ - h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; - h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, - &(h->reply_pool_dhandle)); + h->reply_queue_size = h->max_commands * sizeof(u64); for (i = 0; i < h->nreply_queues; i++) { - h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; + h->reply_queue[i].head = pci_alloc_consistent(h->pdev, + h->reply_queue_size, + &(h->reply_queue[i].busaddr)); + if (!h->reply_queue[i].head) { + rc = -ENOMEM; + goto clean1; /* rq, ioaccel */ + } h->reply_queue[i].size = h->max_commands; h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ h->reply_queue[i].current_entry = 0; @@ -5170,35 +9249,493 @@ /* Need a block fetch table for performant mode */ h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * sizeof(u32)), GFP_KERNEL); + if (!h->blockFetchTable) { + rc = -ENOMEM; + goto clean1; /* rq, ioaccel */ + } - if ((h->reply_pool == NULL) - || (h->blockFetchTable == NULL)) - goto clean_up; + rc = hpsa_enter_performant_mode(h, trans_support); + if (rc) + goto clean2; /* bft, rq, ioaccel */ + return 0; - hpsa_enter_performant_mode(h, - trans_support & CFGTBL_Trans_use_short_tags); +clean2: /* bft, rq, ioaccel */ + kfree(h->blockFetchTable); + h->blockFetchTable = NULL; +clean1: /* rq, ioaccel */ + hpsa_free_reply_queues(h); + hpsa_free_ioaccel1_cmd_and_bft(h); + hpsa_free_ioaccel2_cmd_and_bft(h); + return rc; +} - return; +static int is_accelerated_cmd(struct CommandList *c) +{ + return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; +} -clean_up: - if (h->reply_pool) - pci_free_consistent(h->pdev, h->reply_pool_size, - h->reply_pool, h->reply_pool_dhandle); - kfree(h->blockFetchTable); +static void hpsa_drain_accel_commands(struct ctlr_info *h) +{ + struct CommandList *c = NULL; + int i, accel_cmds_out; + int refcount; + + do { /* wait for all outstanding ioaccel commands to drain out */ + accel_cmds_out = 0; + for (i = 0; i < h->nr_cmds; i++) { + c = h->cmd_pool + i; + refcount = atomic_inc_return(&c->refcount); + if (refcount > 1) /* Command is allocated */ + accel_cmds_out += is_accelerated_cmd(c); + cmd_free(h, c); + } + if (accel_cmds_out <= 0) + break; + msleep(100); + } while (1); +} + +static struct hpsa_sas_phy *hpsa_alloc_sas_phy( + struct hpsa_sas_port *hpsa_sas_port) +{ + struct hpsa_sas_phy *hpsa_sas_phy; + struct sas_phy *phy; + + hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL); + if (!hpsa_sas_phy) + return NULL; + + phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev, + hpsa_sas_port->next_phy_index); + if (!phy) { + kfree(hpsa_sas_phy); + return NULL; + } + + hpsa_sas_port->next_phy_index++; + hpsa_sas_phy->phy = phy; + hpsa_sas_phy->parent_port = hpsa_sas_port; + + return hpsa_sas_phy; +} + +static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) +{ + struct sas_phy *phy = hpsa_sas_phy->phy; + + sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); + sas_phy_free(phy); + if (hpsa_sas_phy->added_to_port) + list_del(&hpsa_sas_phy->phy_list_entry); + kfree(hpsa_sas_phy); +} + +static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy) +{ + int rc; + struct hpsa_sas_port *hpsa_sas_port; + struct sas_phy *phy; + struct sas_identify *identify; + + hpsa_sas_port = hpsa_sas_phy->parent_port; + phy = hpsa_sas_phy->phy; + + identify = &phy->identify; + memset(identify, 0, sizeof(*identify)); + identify->sas_address = hpsa_sas_port->sas_address; + identify->device_type = SAS_END_DEVICE; + identify->initiator_port_protocols = SAS_PROTOCOL_STP; + identify->target_port_protocols = SAS_PROTOCOL_STP; + phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; + phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; + phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + + rc = sas_phy_add(hpsa_sas_phy->phy); + if (rc) + return rc; + + sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy); + list_add_tail(&hpsa_sas_phy->phy_list_entry, + &hpsa_sas_port->phy_list_head); + hpsa_sas_phy->added_to_port = true; + + return 0; +} + +static int + hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port, + struct sas_rphy *rphy) +{ + struct sas_identify *identify; + + identify = &rphy->identify; + identify->sas_address = hpsa_sas_port->sas_address; + identify->initiator_port_protocols = SAS_PROTOCOL_STP; + identify->target_port_protocols = SAS_PROTOCOL_STP; + + return sas_rphy_add(rphy); +} + +static struct hpsa_sas_port + *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node, + u64 sas_address) +{ + int rc; + struct hpsa_sas_port *hpsa_sas_port; + struct sas_port *port; + + hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL); + if (!hpsa_sas_port) + return NULL; + + INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head); + hpsa_sas_port->parent_node = hpsa_sas_node; + + port = sas_port_alloc_num(hpsa_sas_node->parent_dev); + if (!port) + goto free_hpsa_port; + + rc = sas_port_add(port); + if (rc) + goto free_sas_port; + + hpsa_sas_port->port = port; + hpsa_sas_port->sas_address = sas_address; + list_add_tail(&hpsa_sas_port->port_list_entry, + &hpsa_sas_node->port_list_head); + + return hpsa_sas_port; + +free_sas_port: + sas_port_free(port); +free_hpsa_port: + kfree(hpsa_sas_port); + + return NULL; +} + +static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port) +{ + struct hpsa_sas_phy *hpsa_sas_phy; + struct hpsa_sas_phy *next; + + list_for_each_entry_safe(hpsa_sas_phy, next, + &hpsa_sas_port->phy_list_head, phy_list_entry) + hpsa_free_sas_phy(hpsa_sas_phy); + + sas_port_delete(hpsa_sas_port->port); + list_del(&hpsa_sas_port->port_list_entry); + kfree(hpsa_sas_port); +} + +static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev) +{ + struct hpsa_sas_node *hpsa_sas_node; + + hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL); + if (hpsa_sas_node) { + hpsa_sas_node->parent_dev = parent_dev; + INIT_LIST_HEAD(&hpsa_sas_node->port_list_head); + } + + return hpsa_sas_node; +} + +static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node) +{ + struct hpsa_sas_port *hpsa_sas_port; + struct hpsa_sas_port *next; + + if (!hpsa_sas_node) + return; + + list_for_each_entry_safe(hpsa_sas_port, next, + &hpsa_sas_node->port_list_head, port_list_entry) + hpsa_free_sas_port(hpsa_sas_port); + + kfree(hpsa_sas_node); +} + +static struct hpsa_scsi_dev_t + *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, + struct sas_rphy *rphy) +{ + int i; + struct hpsa_scsi_dev_t *device; + + for (i = 0; i < h->ndevices; i++) { + device = h->dev[i]; + if (!device->sas_port) + continue; + if (device->sas_port->rphy == rphy) + return device; + } + + return NULL; +} + +static int hpsa_add_sas_host(struct ctlr_info *h) +{ + int rc; + struct device *parent_dev; + struct hpsa_sas_node *hpsa_sas_node; + struct hpsa_sas_port *hpsa_sas_port; + struct hpsa_sas_phy *hpsa_sas_phy; + + parent_dev = &h->scsi_host->shost_gendev; + + hpsa_sas_node = hpsa_alloc_sas_node(parent_dev); + if (!hpsa_sas_node) + return -ENOMEM; + + hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address); + if (!hpsa_sas_port) { + rc = -ENODEV; + goto free_sas_node; + } + + hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port); + if (!hpsa_sas_phy) { + rc = -ENODEV; + goto free_sas_port; + } + + rc = hpsa_sas_port_add_phy(hpsa_sas_phy); + if (rc) + goto free_sas_phy; + + h->sas_host = hpsa_sas_node; + + return 0; + +free_sas_phy: + hpsa_free_sas_phy(hpsa_sas_phy); +free_sas_port: + hpsa_free_sas_port(hpsa_sas_port); +free_sas_node: + hpsa_free_sas_node(hpsa_sas_node); + + return rc; +} + +static void hpsa_delete_sas_host(struct ctlr_info *h) +{ + hpsa_free_sas_node(h->sas_host); } +static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, + struct hpsa_scsi_dev_t *device) +{ + int rc; + struct hpsa_sas_port *hpsa_sas_port; + struct sas_rphy *rphy; + + hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address); + if (!hpsa_sas_port) + return -ENOMEM; + + rphy = sas_end_device_alloc(hpsa_sas_port->port); + if (!rphy) { + rc = -ENODEV; + goto free_sas_port; + } + + hpsa_sas_port->rphy = rphy; + device->sas_port = hpsa_sas_port; + + rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); + if (rc) + goto free_sas_port; + + return 0; + +free_sas_port: + hpsa_free_sas_port(hpsa_sas_port); + device->sas_port = NULL; + + return rc; +} + +static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device) +{ + if (device->sas_port) { + hpsa_free_sas_port(device->sas_port); + device->sas_port = NULL; + } +} + +static int +hpsa_sas_get_linkerrors(struct sas_phy *phy) +{ + return 0; +} + +static int +hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) +{ + return 0; +} + +static int +hpsa_sas_get_bay_identifier(struct sas_rphy *rphy) +{ + return -ENXIO; +} + +static int +hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset) +{ + return 0; +} + +static int +hpsa_sas_phy_enable(struct sas_phy *phy, int enable) +{ + return 0; +} + +static int +hpsa_sas_phy_setup(struct sas_phy *phy) +{ + return 0; +} + +static void +hpsa_sas_phy_release(struct sas_phy *phy) +{ +} + +static int +hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + return -EINVAL; +} + +/* SMP = Serial Management Protocol */ +static int +hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, +struct request *req) +{ + return -EINVAL; +} + +static struct sas_function_template hpsa_sas_transport_functions = { + .get_linkerrors = hpsa_sas_get_linkerrors, + .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier, + .get_bay_identifier = hpsa_sas_get_bay_identifier, + .phy_reset = hpsa_sas_phy_reset, + .phy_enable = hpsa_sas_phy_enable, + .phy_setup = hpsa_sas_phy_setup, + .phy_release = hpsa_sas_phy_release, + .set_phy_speed = hpsa_sas_phy_speed, + .smp_handler = hpsa_sas_smp_handler, +}; + /* * This is it. Register the PCI driver information for the cards we control * the OS will call our registered routines when it finds one of our cards. */ static int __init hpsa_init(void) { - return pci_register_driver(&hpsa_pci_driver); + int rc; + + hpsa_sas_transport_template = + sas_attach_transport(&hpsa_sas_transport_functions); + if (!hpsa_sas_transport_template) + return -ENODEV; + + rc = pci_register_driver(&hpsa_pci_driver); + + if (rc) + sas_release_transport(hpsa_sas_transport_template); + + return rc; } static void __exit hpsa_cleanup(void) { pci_unregister_driver(&hpsa_pci_driver); + sas_release_transport(hpsa_sas_transport_template); +} + +static void __attribute__((unused)) verify_offsets(void) +{ +#define VERIFY_OFFSET(member, offset) \ + BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) + + VERIFY_OFFSET(structure_size, 0); + VERIFY_OFFSET(volume_blk_size, 4); + VERIFY_OFFSET(volume_blk_cnt, 8); + VERIFY_OFFSET(phys_blk_shift, 16); + VERIFY_OFFSET(parity_rotation_shift, 17); + VERIFY_OFFSET(strip_size, 18); + VERIFY_OFFSET(disk_starting_blk, 20); + VERIFY_OFFSET(disk_blk_cnt, 28); + VERIFY_OFFSET(data_disks_per_row, 36); + VERIFY_OFFSET(metadata_disks_per_row, 38); + VERIFY_OFFSET(row_cnt, 40); + VERIFY_OFFSET(layout_map_count, 42); + VERIFY_OFFSET(flags, 44); + VERIFY_OFFSET(dekindex, 46); + /* VERIFY_OFFSET(reserved, 48 */ + VERIFY_OFFSET(data, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ + BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) + + VERIFY_OFFSET(IU_type, 0); + VERIFY_OFFSET(direction, 1); + VERIFY_OFFSET(reply_queue, 2); + /* VERIFY_OFFSET(reserved1, 3); */ + VERIFY_OFFSET(scsi_nexus, 4); + VERIFY_OFFSET(Tag, 8); + VERIFY_OFFSET(cdb, 16); + VERIFY_OFFSET(cciss_lun, 32); + VERIFY_OFFSET(data_len, 40); + VERIFY_OFFSET(cmd_priority_task_attr, 44); + VERIFY_OFFSET(sg_count, 45); + /* VERIFY_OFFSET(reserved3 */ + VERIFY_OFFSET(err_ptr, 48); + VERIFY_OFFSET(err_len, 56); + /* VERIFY_OFFSET(reserved4 */ + VERIFY_OFFSET(sg, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ + BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) + + VERIFY_OFFSET(dev_handle, 0x00); + VERIFY_OFFSET(reserved1, 0x02); + VERIFY_OFFSET(function, 0x03); + VERIFY_OFFSET(reserved2, 0x04); + VERIFY_OFFSET(err_info, 0x0C); + VERIFY_OFFSET(reserved3, 0x10); + VERIFY_OFFSET(err_info_len, 0x12); + VERIFY_OFFSET(reserved4, 0x13); + VERIFY_OFFSET(sgl_offset, 0x14); + VERIFY_OFFSET(reserved5, 0x15); + VERIFY_OFFSET(transfer_len, 0x1C); + VERIFY_OFFSET(reserved6, 0x20); + VERIFY_OFFSET(io_flags, 0x24); + VERIFY_OFFSET(reserved7, 0x26); + VERIFY_OFFSET(LUN, 0x34); + VERIFY_OFFSET(control, 0x3C); + VERIFY_OFFSET(CDB, 0x40); + VERIFY_OFFSET(reserved8, 0x50); + VERIFY_OFFSET(host_context_flags, 0x60); + VERIFY_OFFSET(timeout_sec, 0x62); + VERIFY_OFFSET(ReplyQueue, 0x64); + VERIFY_OFFSET(reserved9, 0x65); + VERIFY_OFFSET(tag, 0x68); + VERIFY_OFFSET(host_addr, 0x70); + VERIFY_OFFSET(CISS_LUN, 0x78); + VERIFY_OFFSET(SG, 0x78 + 8); +#undef VERIFY_OFFSET } module_init(hpsa_init);