--- zzzz-none-000/linux-3.10.107/drivers/target/target_core_configfs.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/target/target_core_configfs.c 2021-02-04 17:41:59.000000000 +0000 @@ -3,7 +3,7 @@ * * This file contains ConfigFS logic for the Generic Target Engine project. * - * (c) Copyright 2008-2012 RisingTide Systems LLC. + * (c) Copyright 2008-2013 Datera, Inc. * * Nicholas A. Bellinger * @@ -40,26 +40,42 @@ #include #include #include -#include -#include -#include #include "target_core_internal.h" #include "target_core_alua.h" #include "target_core_pr.h" #include "target_core_rd.h" +#include "target_core_xcopy.h" + +#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ +static void target_core_setup_##_name##_cit(struct target_backend *tb) \ +{ \ + struct config_item_type *cit = &tb->tb_##_name##_cit; \ + \ + cit->ct_item_ops = _item_ops; \ + cit->ct_group_ops = _group_ops; \ + cit->ct_attrs = _attrs; \ + cit->ct_owner = tb->ops->owner; \ + pr_debug("Setup generic %s\n", __stringify(_name)); \ +} + +#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ +static void target_core_setup_##_name##_cit(struct target_backend *tb) \ +{ \ + struct config_item_type *cit = &tb->tb_##_name##_cit; \ + \ + cit->ct_item_ops = _item_ops; \ + cit->ct_group_ops = _group_ops; \ + cit->ct_attrs = tb->ops->tb_##_name##_attrs; \ + cit->ct_owner = tb->ops->owner; \ + pr_debug("Setup generic %s\n", __stringify(_name)); \ +} extern struct t10_alua_lu_gp *default_lu_gp; static LIST_HEAD(g_tf_list); static DEFINE_MUTEX(g_tf_lock); -struct target_core_configfs_attribute { - struct configfs_attribute attr; - ssize_t (*show)(void *, char *); - ssize_t (*store)(void *, const char *, size_t); -}; - static struct config_group target_core_hbagroup; static struct config_group alua_group; static struct config_group alua_lu_gps_group; @@ -73,24 +89,15 @@ /* * Attributes for /sys/kernel/config/target/ */ -static ssize_t target_core_attr_show(struct config_item *item, - struct configfs_attribute *attr, - char *page) +static ssize_t target_core_item_version_show(struct config_item *item, + char *page) { return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" - " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION, + " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); } -static struct configfs_item_operations target_core_fabric_item_ops = { - .show_attribute = target_core_attr_show, -}; - -static struct configfs_attribute target_core_item_attr_version = { - .ca_owner = THIS_MODULE, - .ca_name = "version", - .ca_mode = S_IRUGO, -}; +CONFIGFS_ATTR_RO(target_core_item_, version); static struct target_fabric_configfs *target_core_get_fabric( const char *name) @@ -102,7 +109,7 @@ mutex_lock(&g_tf_lock); list_for_each_entry(tf, &g_tf_list, tf_list) { - if (!strcmp(tf->tf_name, name)) { + if (!strcmp(tf->tf_ops->name, name)) { atomic_inc(&tf->tf_access_cnt); mutex_unlock(&g_tf_lock); return tf; @@ -125,78 +132,78 @@ pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" " %s\n", group, name); - /* - * Below are some hardcoded request_module() calls to automatically - * local fabric modules when the following is called: - * - * mkdir -p /sys/kernel/config/target/$MODULE_NAME - * - * Note that this does not limit which TCM fabric module can be - * registered, but simply provids auto loading logic for modules with - * mkdir(2) system calls with known TCM fabric modules. - */ - if (!strncmp(name, "iscsi", 5)) { + + tf = target_core_get_fabric(name); + if (!tf) { + pr_debug("target_core_register_fabric() trying autoload for %s\n", + name); + /* - * Automatically load the LIO Target fabric module when the - * following is called: + * Below are some hardcoded request_module() calls to automatically + * local fabric modules when the following is called: * - * mkdir -p $CONFIGFS/target/iscsi - */ - ret = request_module("iscsi_target_mod"); - if (ret < 0) { - pr_err("request_module() failed for" - " iscsi_target_mod.ko: %d\n", ret); - return ERR_PTR(-EINVAL); - } - } else if (!strncmp(name, "loopback", 8)) { - /* - * Automatically load the tcm_loop fabric module when the - * following is called: + * mkdir -p /sys/kernel/config/target/$MODULE_NAME * - * mkdir -p $CONFIGFS/target/loopback + * Note that this does not limit which TCM fabric module can be + * registered, but simply provids auto loading logic for modules with + * mkdir(2) system calls with known TCM fabric modules. */ - ret = request_module("tcm_loop"); - if (ret < 0) { - pr_err("request_module() failed for" - " tcm_loop.ko: %d\n", ret); - return ERR_PTR(-EINVAL); + + if (!strncmp(name, "iscsi", 5)) { + /* + * Automatically load the LIO Target fabric module when the + * following is called: + * + * mkdir -p $CONFIGFS/target/iscsi + */ + ret = request_module("iscsi_target_mod"); + if (ret < 0) { + pr_debug("request_module() failed for" + " iscsi_target_mod.ko: %d\n", ret); + return ERR_PTR(-EINVAL); + } + } else if (!strncmp(name, "loopback", 8)) { + /* + * Automatically load the tcm_loop fabric module when the + * following is called: + * + * mkdir -p $CONFIGFS/target/loopback + */ + ret = request_module("tcm_loop"); + if (ret < 0) { + pr_debug("request_module() failed for" + " tcm_loop.ko: %d\n", ret); + return ERR_PTR(-EINVAL); + } } + + tf = target_core_get_fabric(name); } - tf = target_core_get_fabric(name); if (!tf) { - pr_err("target_core_get_fabric() failed for %s\n", - name); + pr_debug("target_core_get_fabric() failed for %s\n", + name); return ERR_PTR(-EINVAL); } pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" - " %s\n", tf->tf_name); + " %s\n", tf->tf_ops->name); /* * On a successful target_core_get_fabric() look, the returned * struct target_fabric_configfs *tf will contain a usage reference. */ pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", - &TF_CIT_TMPL(tf)->tfc_wwn_cit); + &tf->tf_wwn_cit); tf->tf_group.default_groups = tf->tf_default_groups; tf->tf_group.default_groups[0] = &tf->tf_disc_group; tf->tf_group.default_groups[1] = NULL; - config_group_init_type_name(&tf->tf_group, name, - &TF_CIT_TMPL(tf)->tfc_wwn_cit); + config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit); config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", - &TF_CIT_TMPL(tf)->tfc_discovery_cit); + &tf->tf_discovery_cit); pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" " %s\n", tf->tf_group.cg_item.ci_name); - /* - * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() - */ - tf->tf_ops.tf_subsys = tf->tf_subsys; - tf->tf_fabric = &tf->tf_group.cg_item; - pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" - " for %s\n", name); - return &tf->tf_group; } @@ -217,13 +224,9 @@ " tf list\n", config_item_name(item)); pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" - " %s\n", tf->tf_name); + " %s\n", tf->tf_ops->name); atomic_dec(&tf->tf_access_cnt); - pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing" - " tf->tf_fabric for %s\n", tf->tf_name); - tf->tf_fabric = NULL; - pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" " %s\n", config_item_name(item)); @@ -253,7 +256,6 @@ * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/ */ static struct config_item_type target_core_fabrics_item = { - .ct_item_ops = &target_core_fabric_item_ops, .ct_group_ops = &target_core_fabric_group_ops, .ct_attrs = target_core_fabric_item_attrs, .ct_owner = THIS_MODULE, @@ -268,98 +270,37 @@ }, }; -static struct configfs_subsystem *target_core_subsystem[] = { - &target_core_fabrics, - NULL, -}; +int target_depend_item(struct config_item *item) +{ + return configfs_depend_item(&target_core_fabrics, item); +} +EXPORT_SYMBOL(target_depend_item); + +void target_undepend_item(struct config_item *item) +{ + return configfs_undepend_item(&target_core_fabrics, item); +} +EXPORT_SYMBOL(target_undepend_item); /*############################################################################## // Start functions called by external Target Fabrics Modules //############################################################################*/ -/* - * First function called by fabric modules to: - * - * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer. - * 2) Add struct target_fabric_configfs to g_tf_list - * 3) Return struct target_fabric_configfs to fabric module to be passed - * into target_fabric_configfs_register(). - */ -struct target_fabric_configfs *target_fabric_configfs_init( - struct module *fabric_mod, - const char *name) +static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) { - struct target_fabric_configfs *tf; - - if (!(name)) { - pr_err("Unable to locate passed fabric name\n"); - return ERR_PTR(-EINVAL); + if (!tfo->name) { + pr_err("Missing tfo->name\n"); + return -EINVAL; } - if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { + if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) { pr_err("Passed name: %s exceeds TARGET_FABRIC" - "_NAME_SIZE\n", name); - return ERR_PTR(-EINVAL); + "_NAME_SIZE\n", tfo->name); + return -EINVAL; } - - tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); - if (!tf) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&tf->tf_list); - atomic_set(&tf->tf_access_cnt, 0); - /* - * Setup the default generic struct config_item_type's (cits) in - * struct target_fabric_configfs->tf_cit_tmpl - */ - tf->tf_module = fabric_mod; - target_fabric_setup_cits(tf); - - tf->tf_subsys = target_core_subsystem[0]; - snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name); - - mutex_lock(&g_tf_lock); - list_add_tail(&tf->tf_list, &g_tf_list); - mutex_unlock(&g_tf_lock); - - pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" - ">>>>>>>>>>>>>>\n"); - pr_debug("Initialized struct target_fabric_configfs: %p for" - " %s\n", tf, tf->tf_name); - return tf; -} -EXPORT_SYMBOL(target_fabric_configfs_init); - -/* - * Called by fabric plugins after FAILED target_fabric_configfs_register() call. - */ -void target_fabric_configfs_free( - struct target_fabric_configfs *tf) -{ - mutex_lock(&g_tf_lock); - list_del(&tf->tf_list); - mutex_unlock(&g_tf_lock); - - kfree(tf); -} -EXPORT_SYMBOL(target_fabric_configfs_free); - -/* - * Perform a sanity check of the passed tf->tf_ops before completing - * TCM fabric module registration. - */ -static int target_fabric_tf_ops_check( - struct target_fabric_configfs *tf) -{ - struct target_core_fabric_ops *tfo = &tf->tf_ops; - if (!tfo->get_fabric_name) { pr_err("Missing tfo->get_fabric_name()\n"); return -EINVAL; } - if (!tfo->get_fabric_proto_ident) { - pr_err("Missing tfo->get_fabric_proto_ident()\n"); - return -EINVAL; - } if (!tfo->tpg_get_wwn) { pr_err("Missing tfo->tpg_get_wwn()\n"); return -EINVAL; @@ -368,18 +309,6 @@ pr_err("Missing tfo->tpg_get_tag()\n"); return -EINVAL; } - if (!tfo->tpg_get_default_depth) { - pr_err("Missing tfo->tpg_get_default_depth()\n"); - return -EINVAL; - } - if (!tfo->tpg_get_pr_transport_id) { - pr_err("Missing tfo->tpg_get_pr_transport_id()\n"); - return -EINVAL; - } - if (!tfo->tpg_get_pr_transport_id_len) { - pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n"); - return -EINVAL; - } if (!tfo->tpg_check_demo_mode) { pr_err("Missing tfo->tpg_check_demo_mode()\n"); return -EINVAL; @@ -396,14 +325,6 @@ pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); return -EINVAL; } - if (!tfo->tpg_alloc_fabric_acl) { - pr_err("Missing tfo->tpg_alloc_fabric_acl()\n"); - return -EINVAL; - } - if (!tfo->tpg_release_fabric_acl) { - pr_err("Missing tfo->tpg_release_fabric_acl()\n"); - return -EINVAL; - } if (!tfo->tpg_get_inst_index) { pr_err("Missing tfo->tpg_get_inst_index()\n"); return -EINVAL; @@ -436,10 +357,6 @@ pr_err("Missing tfo->set_default_node_attributes()\n"); return -EINVAL; } - if (!tfo->get_task_tag) { - pr_err("Missing tfo->get_task_tag()\n"); - return -EINVAL; - } if (!tfo->get_cmd_state) { pr_err("Missing tfo->get_cmd_state()\n"); return -EINVAL; @@ -456,6 +373,10 @@ pr_err("Missing tfo->queue_tm_rsp()\n"); return -EINVAL; } + if (!tfo->aborted_task) { + pr_err("Missing tfo->aborted_task()\n"); + return -EINVAL; + } /* * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in @@ -481,282 +402,682 @@ return 0; } -/* - * Called 2nd from fabric module with returned parameter of - * struct target_fabric_configfs * from target_fabric_configfs_init(). - * - * Upon a successful registration, the new fabric's struct config_item is - * return. Also, a pointer to this struct is set in the passed - * struct target_fabric_configfs. - */ -int target_fabric_configfs_register( - struct target_fabric_configfs *tf) +int target_register_template(const struct target_core_fabric_ops *fo) { + struct target_fabric_configfs *tf; int ret; + ret = target_fabric_tf_ops_check(fo); + if (ret) + return ret; + + tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); if (!tf) { - pr_err("Unable to locate target_fabric_configfs" - " pointer\n"); - return -EINVAL; - } - if (!tf->tf_subsys) { - pr_err("Unable to target struct config_subsystem" - " pointer\n"); - return -EINVAL; + pr_err("%s: could not allocate memory!\n", __func__); + return -ENOMEM; } - ret = target_fabric_tf_ops_check(tf); - if (ret < 0) - return ret; - pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" - ">>>>>>>>>>\n"); + INIT_LIST_HEAD(&tf->tf_list); + atomic_set(&tf->tf_access_cnt, 0); + tf->tf_ops = fo; + target_fabric_setup_cits(tf); + + mutex_lock(&g_tf_lock); + list_add_tail(&tf->tf_list, &g_tf_list); + mutex_unlock(&g_tf_lock); + return 0; } -EXPORT_SYMBOL(target_fabric_configfs_register); +EXPORT_SYMBOL(target_register_template); -void target_fabric_configfs_deregister( - struct target_fabric_configfs *tf) +void target_unregister_template(const struct target_core_fabric_ops *fo) { - struct configfs_subsystem *su; + struct target_fabric_configfs *t; - if (!tf) { - pr_err("Unable to locate passed target_fabric_" - "configfs\n"); - return; - } - su = tf->tf_subsys; - if (!su) { - pr_err("Unable to locate passed tf->tf_subsys" - " pointer\n"); - return; - } - pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" - ">>>>>>>>>>>>\n"); mutex_lock(&g_tf_lock); - if (atomic_read(&tf->tf_access_cnt)) { - mutex_unlock(&g_tf_lock); - pr_err("Non zero tf->tf_access_cnt for fabric %s\n", - tf->tf_name); - BUG(); + list_for_each_entry(t, &g_tf_list, tf_list) { + if (!strcmp(t->tf_ops->name, fo->name)) { + BUG_ON(atomic_read(&t->tf_access_cnt)); + list_del(&t->tf_list); + mutex_unlock(&g_tf_lock); + /* + * Wait for any outstanding fabric se_deve_entry->rcu_head + * callbacks to complete post kfree_rcu(), before allowing + * fabric driver unload of TFO->module to proceed. + */ + rcu_barrier(); + kfree(t); + return; + } } - list_del(&tf->tf_list); mutex_unlock(&g_tf_lock); - - pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" - " %s\n", tf->tf_name); - tf->tf_module = NULL; - tf->tf_subsys = NULL; - kfree(tf); - - pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" - ">>>>>\n"); } -EXPORT_SYMBOL(target_fabric_configfs_deregister); +EXPORT_SYMBOL(target_unregister_template); /*############################################################################## // Stop functions called by external Target Fabrics Modules //############################################################################*/ -/* Start functions for struct config_item_type target_core_dev_attrib_cit */ +static inline struct se_dev_attrib *to_attrib(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_dev_attrib, + da_group); +} -#define DEF_DEV_ATTRIB_SHOW(_name) \ -static ssize_t target_core_dev_show_attr_##_name( \ - struct se_dev_attrib *da, \ - char *page) \ +/* Start functions for struct config_item_type tb_dev_attrib_cit */ +#define DEF_CONFIGFS_ATTRIB_SHOW(_name) \ +static ssize_t _name##_show(struct config_item *item, char *page) \ { \ - return snprintf(page, PAGE_SIZE, "%u\n", \ - (u32)da->da_dev->dev_attrib._name); \ + return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \ } -#define DEF_DEV_ATTRIB_STORE(_name) \ -static ssize_t target_core_dev_store_attr_##_name( \ - struct se_dev_attrib *da, \ - const char *page, \ - size_t count) \ +DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc); +DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type); +DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type); +DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_format); +DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids); +DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord); +DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl); +DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size); +DEF_CONFIGFS_ATTRIB_SHOW(block_size); +DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors); +DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors); +DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth); +DEF_CONFIGFS_ATTRIB_SHOW(queue_depth); +DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count); +DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count); +DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity); +DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment); +DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); + +#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ +static ssize_t _name##_store(struct config_item *item, const char *page,\ + size_t count) \ { \ - unsigned long val; \ + struct se_dev_attrib *da = to_attrib(item); \ + u32 val; \ int ret; \ \ - ret = strict_strtoul(page, 0, &val); \ - if (ret < 0) { \ - pr_err("strict_strtoul() failed with" \ - " ret: %d\n", ret); \ - return -EINVAL; \ - } \ - ret = se_dev_set_##_name(da->da_dev, (u32)val); \ + ret = kstrtou32(page, 0, &val); \ + if (ret < 0) \ + return ret; \ + da->_name = val; \ + return count; \ +} + +DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count); +DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count); +DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity); +DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment); +DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len); + +#define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \ +static ssize_t _name##_store(struct config_item *item, const char *page, \ + size_t count) \ +{ \ + struct se_dev_attrib *da = to_attrib(item); \ + bool flag; \ + int ret; \ \ - return (!ret) ? count : -EINVAL; \ + ret = strtobool(page, &flag); \ + if (ret < 0) \ + return ret; \ + da->_name = flag; \ + return count; \ +} + +DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write); +DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw); +DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc); +DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids); +DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot); + +#define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \ +static ssize_t _name##_store(struct config_item *item, const char *page,\ + size_t count) \ +{ \ + printk_once(KERN_WARNING \ + "ignoring deprecated ##_name## attribute\n"); \ + return count; \ } -#define DEF_DEV_ATTRIB(_name) \ -DEF_DEV_ATTRIB_SHOW(_name); \ -DEF_DEV_ATTRIB_STORE(_name); +DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo); +DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read); -#define DEF_DEV_ATTRIB_RO(_name) \ -DEF_DEV_ATTRIB_SHOW(_name); +static void dev_set_t10_wwn_model_alias(struct se_device *dev) +{ + const char *configname; -CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); -#define SE_DEV_ATTR(_name, _mode) \ -static struct target_core_dev_attrib_attribute \ - target_core_dev_attrib_##_name = \ - __CONFIGFS_EATTR(_name, _mode, \ - target_core_dev_show_attr_##_name, \ - target_core_dev_store_attr_##_name); + configname = config_item_name(&dev->dev_group.cg_item); + if (strlen(configname) >= 16) { + pr_warn("dev[%p]: Backstore name '%s' is too long for " + "INQUIRY_MODEL, truncating to 16 bytes\n", dev, + configname); + } + snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); +} -#define SE_DEV_ATTR_RO(_name); \ -static struct target_core_dev_attrib_attribute \ - target_core_dev_attrib_##_name = \ - __CONFIGFS_EATTR_RO(_name, \ - target_core_dev_show_attr_##_name); +static ssize_t emulate_model_alias_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + bool flag; + int ret; -DEF_DEV_ATTRIB(emulate_model_alias); -SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR); + if (dev->export_count) { + pr_err("dev[%p]: Unable to change model alias" + " while export_count is %d\n", + dev, dev->export_count); + return -EINVAL; + } -DEF_DEV_ATTRIB(emulate_dpo); -SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR); + ret = strtobool(page, &flag); + if (ret < 0) + return ret; -DEF_DEV_ATTRIB(emulate_fua_write); -SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR); + if (flag) { + dev_set_t10_wwn_model_alias(dev); + } else { + strncpy(&dev->t10_wwn.model[0], + dev->transport->inquiry_prod, 16); + } + da->emulate_model_alias = flag; + return count; +} + +static ssize_t emulate_write_cache_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = strtobool(page, &flag); + if (ret < 0) + return ret; + + if (flag && da->da_dev->transport->get_write_cache) { + pr_err("emulate_write_cache not supported for this device\n"); + return -EINVAL; + } + + da->emulate_write_cache = flag; + pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", + da->da_dev, flag); + return count; +} + +static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + u32 val; + int ret; -DEF_DEV_ATTRIB(emulate_fua_read); -SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR); + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (val != 0 && val != 1 && val != 2) { + pr_err("Illegal value %d\n", val); + return -EINVAL; + } -DEF_DEV_ATTRIB(emulate_write_cache); -SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR); + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device" + " UA_INTRLCK_CTRL while export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + da->emulate_ua_intlck_ctrl = val; + pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", + da->da_dev, val); + return count; +} -DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl); -SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); +static ssize_t emulate_tas_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; -DEF_DEV_ATTRIB(emulate_tas); -SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR); + ret = strtobool(page, &flag); + if (ret < 0) + return ret; -DEF_DEV_ATTRIB(emulate_tpu); -SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR); + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device TAS while" + " export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + da->emulate_tas = flag; + pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", + da->da_dev, flag ? "Enabled" : "Disabled"); -DEF_DEV_ATTRIB(emulate_tpws); -SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); + return count; +} -DEF_DEV_ATTRIB(enforce_pr_isids); -SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); +static ssize_t emulate_tpu_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; -DEF_DEV_ATTRIB(is_nonrot); -SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); + ret = strtobool(page, &flag); + if (ret < 0) + return ret; -DEF_DEV_ATTRIB(emulate_rest_reord); -SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); + /* + * We expect this value to be non-zero when generic Block Layer + * Discard supported is detected iblock_create_virtdevice(). + */ + if (flag && !da->max_unmap_block_desc_count) { + pr_err("Generic Block Discard not supported\n"); + return -ENOSYS; + } -DEF_DEV_ATTRIB_RO(hw_block_size); -SE_DEV_ATTR_RO(hw_block_size); + da->emulate_tpu = flag; + pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", + da->da_dev, flag); + return count; +} -DEF_DEV_ATTRIB(block_size); -SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); +static ssize_t emulate_tpws_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; -DEF_DEV_ATTRIB_RO(hw_max_sectors); -SE_DEV_ATTR_RO(hw_max_sectors); + ret = strtobool(page, &flag); + if (ret < 0) + return ret; -DEF_DEV_ATTRIB(fabric_max_sectors); -SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR); + /* + * We expect this value to be non-zero when generic Block Layer + * Discard supported is detected iblock_create_virtdevice(). + */ + if (flag && !da->max_unmap_block_desc_count) { + pr_err("Generic Block Discard not supported\n"); + return -ENOSYS; + } -DEF_DEV_ATTRIB(optimal_sectors); -SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); + da->emulate_tpws = flag; + pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", + da->da_dev, flag); + return count; +} -DEF_DEV_ATTRIB_RO(hw_queue_depth); -SE_DEV_ATTR_RO(hw_queue_depth); +static ssize_t pi_prot_type_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + int old_prot = da->pi_prot_type, ret; + struct se_device *dev = da->da_dev; + u32 flag; -DEF_DEV_ATTRIB(queue_depth); -SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); + ret = kstrtou32(page, 0, &flag); + if (ret < 0) + return ret; -DEF_DEV_ATTRIB(max_unmap_lba_count); -SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); + if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { + pr_err("Illegal value %d for pi_prot_type\n", flag); + return -EINVAL; + } + if (flag == 2) { + pr_err("DIF TYPE2 protection currently not supported\n"); + return -ENOSYS; + } + if (da->hw_pi_prot_type) { + pr_warn("DIF protection enabled on underlying hardware," + " ignoring\n"); + return count; + } + if (!dev->transport->init_prot || !dev->transport->free_prot) { + /* 0 is only allowed value for non-supporting backends */ + if (flag == 0) + return count; -DEF_DEV_ATTRIB(max_unmap_block_desc_count); -SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR); + pr_err("DIF protection not supported by backend: %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("DIF protection requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } -DEF_DEV_ATTRIB(unmap_granularity); -SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR); + da->pi_prot_type = flag; -DEF_DEV_ATTRIB(unmap_granularity_alignment); -SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR); + if (flag && !old_prot) { + ret = dev->transport->init_prot(dev); + if (ret) { + da->pi_prot_type = old_prot; + return ret; + } -DEF_DEV_ATTRIB(max_write_same_len); -SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR); + } else if (!flag && old_prot) { + dev->transport->free_prot(dev); + } -CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); + pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); + return count; +} -static struct configfs_attribute *target_core_dev_attrib_attrs[] = { - &target_core_dev_attrib_emulate_model_alias.attr, - &target_core_dev_attrib_emulate_dpo.attr, - &target_core_dev_attrib_emulate_fua_write.attr, - &target_core_dev_attrib_emulate_fua_read.attr, - &target_core_dev_attrib_emulate_write_cache.attr, - &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr, - &target_core_dev_attrib_emulate_tas.attr, - &target_core_dev_attrib_emulate_tpu.attr, - &target_core_dev_attrib_emulate_tpws.attr, - &target_core_dev_attrib_enforce_pr_isids.attr, - &target_core_dev_attrib_is_nonrot.attr, - &target_core_dev_attrib_emulate_rest_reord.attr, - &target_core_dev_attrib_hw_block_size.attr, - &target_core_dev_attrib_block_size.attr, - &target_core_dev_attrib_hw_max_sectors.attr, - &target_core_dev_attrib_fabric_max_sectors.attr, - &target_core_dev_attrib_optimal_sectors.attr, - &target_core_dev_attrib_hw_queue_depth.attr, - &target_core_dev_attrib_queue_depth.attr, - &target_core_dev_attrib_max_unmap_lba_count.attr, - &target_core_dev_attrib_max_unmap_block_desc_count.attr, - &target_core_dev_attrib_unmap_granularity.attr, - &target_core_dev_attrib_unmap_granularity_alignment.attr, - &target_core_dev_attrib_max_write_same_len.attr, +static ssize_t pi_prot_format_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + bool flag; + int ret; + + ret = strtobool(page, &flag); + if (ret < 0) + return ret; + + if (!flag) + return count; + + if (!dev->transport->format_prot) { + pr_err("DIF protection format not supported by backend %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("DIF protection format requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to format SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + ret = dev->transport->format_prot(dev); + if (ret) + return ret; + + pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); + return count; +} + +static ssize_t force_pr_aptpl_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = strtobool(page, &flag); + if (ret < 0) + return ret; + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to set force_pr_aptpl while" + " export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + + da->force_pr_aptpl = flag; + pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag); + return count; +} + +static ssize_t emulate_rest_reord_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = strtobool(page, &flag); + if (ret < 0) + return ret; + + if (flag != 0) { + printk(KERN_ERR "dev[%p]: SE Device emulation of restricted" + " reordering not implemented\n", da->da_dev); + return -ENOSYS; + } + da->emulate_rest_reord = flag; + pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", + da->da_dev, flag); + return count; +} + +/* + * Note, this can only be called on unexported SE Device Object. + */ +static ssize_t queue_depth_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device TCQ while" + " export_count is %d\n", + dev, dev->export_count); + return -EINVAL; + } + if (!val) { + pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev); + return -EINVAL; + } + + if (val > dev->dev_attrib.queue_depth) { + if (val > dev->dev_attrib.hw_queue_depth) { + pr_err("dev[%p]: Passed queue_depth:" + " %u exceeds TCM/SE_Device MAX" + " TCQ: %u\n", dev, val, + dev->dev_attrib.hw_queue_depth); + return -EINVAL; + } + } + da->queue_depth = dev->queue_depth = val; + pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val); + return count; +} + +static ssize_t optimal_sectors_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device" + " optimal_sectors while export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + if (val > da->hw_max_sectors) { + pr_err("dev[%p]: Passed optimal_sectors %u cannot be" + " greater than hw_max_sectors: %u\n", + da->da_dev, val, da->hw_max_sectors); + return -EINVAL; + } + + da->optimal_sectors = val; + pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", + da->da_dev, val); + return count; +} + +static ssize_t block_size_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device block_size" + " while export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + + if (val != 512 && val != 1024 && val != 2048 && val != 4096) { + pr_err("dev[%p]: Illegal value for block_device: %u" + " for SE device, must be 512, 1024, 2048 or 4096\n", + da->da_dev, val); + return -EINVAL; + } + + da->block_size = val; + if (da->max_bytes_per_io) + da->hw_max_sectors = da->max_bytes_per_io / val; + + pr_debug("dev[%p]: SE Device block_size changed to %u\n", + da->da_dev, val); + return count; +} + +CONFIGFS_ATTR(, emulate_model_alias); +CONFIGFS_ATTR(, emulate_dpo); +CONFIGFS_ATTR(, emulate_fua_write); +CONFIGFS_ATTR(, emulate_fua_read); +CONFIGFS_ATTR(, emulate_write_cache); +CONFIGFS_ATTR(, emulate_ua_intlck_ctrl); +CONFIGFS_ATTR(, emulate_tas); +CONFIGFS_ATTR(, emulate_tpu); +CONFIGFS_ATTR(, emulate_tpws); +CONFIGFS_ATTR(, emulate_caw); +CONFIGFS_ATTR(, emulate_3pc); +CONFIGFS_ATTR(, pi_prot_type); +CONFIGFS_ATTR_RO(, hw_pi_prot_type); +CONFIGFS_ATTR(, pi_prot_format); +CONFIGFS_ATTR(, enforce_pr_isids); +CONFIGFS_ATTR(, is_nonrot); +CONFIGFS_ATTR(, emulate_rest_reord); +CONFIGFS_ATTR(, force_pr_aptpl); +CONFIGFS_ATTR_RO(, hw_block_size); +CONFIGFS_ATTR(, block_size); +CONFIGFS_ATTR_RO(, hw_max_sectors); +CONFIGFS_ATTR(, optimal_sectors); +CONFIGFS_ATTR_RO(, hw_queue_depth); +CONFIGFS_ATTR(, queue_depth); +CONFIGFS_ATTR(, max_unmap_lba_count); +CONFIGFS_ATTR(, max_unmap_block_desc_count); +CONFIGFS_ATTR(, unmap_granularity); +CONFIGFS_ATTR(, unmap_granularity_alignment); +CONFIGFS_ATTR(, max_write_same_len); + +/* + * dev_attrib attributes for devices using the target core SBC/SPC + * interpreter. Any backend using spc_parse_cdb should be using + * these. + */ +struct configfs_attribute *sbc_attrib_attrs[] = { + &attr_emulate_model_alias, + &attr_emulate_dpo, + &attr_emulate_fua_write, + &attr_emulate_fua_read, + &attr_emulate_write_cache, + &attr_emulate_ua_intlck_ctrl, + &attr_emulate_tas, + &attr_emulate_tpu, + &attr_emulate_tpws, + &attr_emulate_caw, + &attr_emulate_3pc, + &attr_pi_prot_type, + &attr_hw_pi_prot_type, + &attr_pi_prot_format, + &attr_enforce_pr_isids, + &attr_is_nonrot, + &attr_emulate_rest_reord, + &attr_force_pr_aptpl, + &attr_hw_block_size, + &attr_block_size, + &attr_hw_max_sectors, + &attr_optimal_sectors, + &attr_hw_queue_depth, + &attr_queue_depth, + &attr_max_unmap_lba_count, + &attr_max_unmap_block_desc_count, + &attr_unmap_granularity, + &attr_unmap_granularity_alignment, + &attr_max_write_same_len, NULL, }; +EXPORT_SYMBOL(sbc_attrib_attrs); -static struct configfs_item_operations target_core_dev_attrib_ops = { - .show_attribute = target_core_dev_attrib_attr_show, - .store_attribute = target_core_dev_attrib_attr_store, +/* + * Minimal dev_attrib attributes for devices passing through CDBs. + * In this case we only provide a few read-only attributes for + * backwards compatibility. + */ +struct configfs_attribute *passthrough_attrib_attrs[] = { + &attr_hw_pi_prot_type, + &attr_hw_block_size, + &attr_hw_max_sectors, + &attr_hw_queue_depth, + NULL, }; +EXPORT_SYMBOL(passthrough_attrib_attrs); -static struct config_item_type target_core_dev_attrib_cit = { - .ct_item_ops = &target_core_dev_attrib_ops, - .ct_attrs = target_core_dev_attrib_attrs, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL); -/* End functions for struct config_item_type target_core_dev_attrib_cit */ +/* End functions for struct config_item_type tb_dev_attrib_cit */ -/* Start functions for struct config_item_type target_core_dev_wwn_cit */ +/* Start functions for struct config_item_type tb_dev_wwn_cit */ -CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); -#define SE_DEV_WWN_ATTR(_name, _mode) \ -static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \ - __CONFIGFS_EATTR(_name, _mode, \ - target_core_dev_wwn_show_attr_##_name, \ - target_core_dev_wwn_store_attr_##_name); - -#define SE_DEV_WWN_ATTR_RO(_name); \ -do { \ - static struct target_core_dev_wwn_attribute \ - target_core_dev_wwn_##_name = \ - __CONFIGFS_EATTR_RO(_name, \ - target_core_dev_wwn_show_attr_##_name); \ -} while (0); +static struct t10_wwn *to_t10_wwn(struct config_item *item) +{ + return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group); +} /* * VPD page 0x80 Unit serial */ -static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial( - struct t10_wwn *t10_wwn, - char *page) +static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item, + char *page) { return sprintf(page, "T10 VPD Unit Serial Number: %s\n", - &t10_wwn->unit_serial[0]); + &to_t10_wwn(item)->unit_serial[0]); } -static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( - struct t10_wwn *t10_wwn, - const char *page, - size_t count) +static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item, + const char *page, size_t count) { + struct t10_wwn *t10_wwn = to_t10_wwn(item); struct se_device *dev = t10_wwn->t10_dev; unsigned char buf[INQUIRY_VPD_SERIAL_LEN]; @@ -812,15 +1133,13 @@ return count; } -SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR); - /* * VPD page 0x83 Protocol Identifier */ -static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier( - struct t10_wwn *t10_wwn, - char *page) +static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item, + char *page) { + struct t10_wwn *t10_wwn = to_t10_wwn(item); struct t10_vpd *vpd; unsigned char buf[VPD_TMP_BUF_SIZE]; ssize_t len = 0; @@ -844,25 +1163,15 @@ return len; } -static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier( - struct t10_wwn *t10_wwn, - const char *page, - size_t count) -{ - return -ENOSYS; -} - -SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR); - /* * Generic wrapper for dumping VPD identifiers by association. */ #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \ -static ssize_t target_core_dev_wwn_show_attr_##_name( \ - struct t10_wwn *t10_wwn, \ - char *page) \ +static ssize_t target_wwn_##_name##_show(struct config_item *item, \ + char *page) \ { \ - struct t10_vpd *vpd; \ + struct t10_wwn *t10_wwn = to_t10_wwn(item); \ + struct t10_vpd *vpd; \ unsigned char buf[VPD_TMP_BUF_SIZE]; \ ssize_t len = 0; \ \ @@ -894,88 +1203,39 @@ return len; \ } -/* - * VPD page 0x83 Association: Logical Unit - */ +/* VPD page 0x83 Association: Logical Unit */ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); - -static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit( - struct t10_wwn *t10_wwn, - const char *page, - size_t count) -{ - return -ENOSYS; -} - -SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR); - -/* - * VPD page 0x83 Association: Target Port - */ +/* VPD page 0x83 Association: Target Port */ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10); - -static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port( - struct t10_wwn *t10_wwn, - const char *page, - size_t count) -{ - return -ENOSYS; -} - -SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR); - -/* - * VPD page 0x83 Association: SCSI Target Device - */ +/* VPD page 0x83 Association: SCSI Target Device */ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); -static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device( - struct t10_wwn *t10_wwn, - const char *page, - size_t count) -{ - return -ENOSYS; -} - -SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR); - -CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group); +CONFIGFS_ATTR(target_wwn_, vpd_unit_serial); +CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier); +CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit); +CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port); +CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device); static struct configfs_attribute *target_core_dev_wwn_attrs[] = { - &target_core_dev_wwn_vpd_unit_serial.attr, - &target_core_dev_wwn_vpd_protocol_identifier.attr, - &target_core_dev_wwn_vpd_assoc_logical_unit.attr, - &target_core_dev_wwn_vpd_assoc_target_port.attr, - &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr, + &target_wwn_attr_vpd_unit_serial, + &target_wwn_attr_vpd_protocol_identifier, + &target_wwn_attr_vpd_assoc_logical_unit, + &target_wwn_attr_vpd_assoc_target_port, + &target_wwn_attr_vpd_assoc_scsi_target_device, NULL, }; -static struct configfs_item_operations target_core_dev_wwn_ops = { - .show_attribute = target_core_dev_wwn_attr_show, - .store_attribute = target_core_dev_wwn_attr_store, -}; - -static struct config_item_type target_core_dev_wwn_cit = { - .ct_item_ops = &target_core_dev_wwn_ops, - .ct_attrs = target_core_dev_wwn_attrs, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs); -/* End functions for struct config_item_type target_core_dev_wwn_cit */ +/* End functions for struct config_item_type tb_dev_wwn_cit */ -/* Start functions for struct config_item_type target_core_dev_pr_cit */ +/* Start functions for struct config_item_type tb_dev_pr_cit */ -CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device); -#define SE_DEV_PR_ATTR(_name, _mode) \ -static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ - __CONFIGFS_EATTR(_name, _mode, \ - target_core_dev_pr_show_attr_##_name, \ - target_core_dev_pr_store_attr_##_name); - -#define SE_DEV_PR_ATTR_RO(_name); \ -static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ - __CONFIGFS_EATTR_RO(_name, \ - target_core_dev_pr_show_attr_##_name); +static struct se_device *pr_to_dev(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_device, + dev_pr_group); +} static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev, char *page) @@ -983,7 +1243,6 @@ struct se_node_acl *se_nacl; struct t10_pr_registration *pr_reg; char i_buf[PR_REG_ISID_ID_LEN]; - int prf_isid; memset(i_buf, 0, PR_REG_ISID_ID_LEN); @@ -992,12 +1251,11 @@ return sprintf(page, "No SPC-3 Reservation holder\n"); se_nacl = pr_reg->pr_reg_nacl; - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], - PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), - se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); + se_nacl->initiatorname, i_buf); } static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev, @@ -1018,12 +1276,12 @@ return len; } -static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev, - char *page) +static ssize_t target_pr_res_holder_show(struct config_item *item, char *page) { + struct se_device *dev = pr_to_dev(item); int ret; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return sprintf(page, "Passthrough\n"); spin_lock(&dev->dev_reservation_lock); @@ -1035,11 +1293,10 @@ return ret; } -SE_DEV_PR_ATTR_RO(res_holder); - -static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( - struct se_device *dev, char *page) +static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item, + char *page) { + struct se_device *dev = pr_to_dev(item); ssize_t len = 0; spin_lock(&dev->dev_reservation_lock); @@ -1057,27 +1314,21 @@ return len; } -SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts); - -static ssize_t target_core_dev_pr_show_attr_res_pr_generation( - struct se_device *dev, char *page) +static ssize_t target_pr_res_pr_generation_show(struct config_item *item, + char *page) { - return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation); + return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation); } -SE_DEV_PR_ATTR_RO(res_pr_generation); -/* - * res_pr_holder_tg_port - */ -static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( - struct se_device *dev, char *page) +static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item, + char *page) { + struct se_device *dev = pr_to_dev(item); struct se_node_acl *se_nacl; - struct se_lun *lun; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg; - struct target_core_fabric_ops *tfo; + const struct target_core_fabric_ops *tfo; ssize_t len = 0; spin_lock(&dev->dev_reservation_lock); @@ -1089,7 +1340,6 @@ se_nacl = pr_reg->pr_reg_nacl; se_tpg = se_nacl->se_tpg; - lun = pr_reg->pr_reg_tg_pt_lun; tfo = se_tpg->se_tpg_tfo; len += sprintf(page+len, "SPC-3 Reservation: %s" @@ -1097,26 +1347,26 @@ tfo->tpg_get_wwn(se_tpg)); len += sprintf(page+len, "SPC-3 Reservation: Relative Port" " Identifier Tag: %hu %s Portal Group Tag: %hu" - " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi, + " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi, tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), - tfo->get_fabric_name(), lun->unpacked_lun); + tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun); out_unlock: spin_unlock(&dev->dev_reservation_lock); return len; } -SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); -static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( - struct se_device *dev, char *page) +static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item, + char *page) { - struct target_core_fabric_ops *tfo; + struct se_device *dev = pr_to_dev(item); + const struct target_core_fabric_ops *tfo; struct t10_pr_registration *pr_reg; unsigned char buf[384]; char i_buf[PR_REG_ISID_ID_LEN]; ssize_t len = 0; - int reg_count = 0, prf_isid; + int reg_count = 0; len += sprintf(page+len, "SPC-3 PR Registrations:\n"); @@ -1127,12 +1377,11 @@ memset(buf, 0, 384); memset(i_buf, 0, PR_REG_ISID_ID_LEN); tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; - prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", tfo->get_fabric_name(), - pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ? - &i_buf[0] : "", pr_reg->pr_res_key, + pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key, pr_reg->pr_res_generation); if (len + strlen(buf) >= PAGE_SIZE) @@ -1149,11 +1398,9 @@ return len; } -SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts); - -static ssize_t target_core_dev_pr_show_attr_res_pr_type( - struct se_device *dev, char *page) +static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page) { + struct se_device *dev = pr_to_dev(item); struct t10_pr_registration *pr_reg; ssize_t len = 0; @@ -1170,12 +1417,11 @@ return len; } -SE_DEV_PR_ATTR_RO(res_pr_type); - -static ssize_t target_core_dev_pr_show_attr_res_type( - struct se_device *dev, char *page) +static ssize_t target_pr_res_type_show(struct config_item *item, char *page) { - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + struct se_device *dev = pr_to_dev(item); + + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return sprintf(page, "SPC_PASSTHROUGH\n"); else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return sprintf(page, "SPC2_RESERVATIONS\n"); @@ -1183,27 +1429,24 @@ return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); } -SE_DEV_PR_ATTR_RO(res_type); - -static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( - struct se_device *dev, char *page) +static ssize_t target_pr_res_aptpl_active_show(struct config_item *item, + char *page) { - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + struct se_device *dev = pr_to_dev(item); + + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; return sprintf(page, "APTPL Bit Status: %s\n", (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); } -SE_DEV_PR_ATTR_RO(res_aptpl_active); - -/* - * res_aptpl_metadata - */ -static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( - struct se_device *dev, char *page) +static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item, + char *page) { - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + struct se_device *dev = pr_to_dev(item); + + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; return sprintf(page, "Ready to process PR APTPL metadata..\n"); @@ -1225,35 +1468,34 @@ {Opt_res_type, "res_type=%d"}, {Opt_res_scope, "res_scope=%d"}, {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, - {Opt_mapped_lun, "mapped_lun=%d"}, + {Opt_mapped_lun, "mapped_lun=%lld"}, {Opt_target_fabric, "target_fabric=%s"}, {Opt_target_node, "target_node=%s"}, {Opt_tpgt, "tpgt=%d"}, {Opt_port_rtpi, "port_rtpi=%d"}, - {Opt_target_lun, "target_lun=%d"}, + {Opt_target_lun, "target_lun=%lld"}, {Opt_err, NULL} }; -static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( - struct se_device *dev, - const char *page, - size_t count) +static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item, + const char *page, size_t count) { + struct se_device *dev = pr_to_dev(item); unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; unsigned char *t_fabric = NULL, *t_port = NULL; - char *orig, *ptr, *arg_p, *opts; + char *orig, *ptr, *opts; substring_t args[MAX_OPT_ARGS]; unsigned long long tmp_ll; u64 sa_res_key = 0; - u32 mapped_lun = 0, target_lun = 0; + u64 mapped_lun = 0, target_lun = 0; int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; - u16 port_rpti = 0, tpgt = 0; - u8 type = 0, scope; + u16 tpgt = 0; + u8 type = 0; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) - return 0; + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + return count; if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) - return 0; + return count; if (dev->export_count) { pr_debug("Unable to process APTPL metadata while" @@ -1273,14 +1515,14 @@ token = match_token(ptr, tokens, args); switch (token) { case Opt_initiator_fabric: - i_fabric = match_strdup(&args[0]); + i_fabric = match_strdup(args); if (!i_fabric) { ret = -ENOMEM; goto out; } break; case Opt_initiator_node: - i_port = match_strdup(&args[0]); + i_port = match_strdup(args); if (!i_port) { ret = -ENOMEM; goto out; @@ -1294,7 +1536,7 @@ } break; case Opt_initiator_sid: - isid = match_strdup(&args[0]); + isid = match_strdup(args); if (!isid) { ret = -ENOMEM; goto out; @@ -1308,15 +1550,9 @@ } break; case Opt_sa_res_key: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; - goto out; - } - ret = strict_strtoull(arg_p, 0, &tmp_ll); + ret = kstrtoull(args->from, 0, &tmp_ll); if (ret < 0) { - pr_err("strict_strtoull() failed for" - " sa_res_key=\n"); + pr_err("kstrtoull() failed for sa_res_key=\n"); goto out; } sa_res_key = (u64)tmp_ll; @@ -1325,37 +1561,46 @@ * PR APTPL Metadata for Reservation */ case Opt_res_holder: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; res_holder = arg; break; case Opt_res_type: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; type = (u8)arg; break; case Opt_res_scope: - match_int(args, &arg); - scope = (u8)arg; + ret = match_int(args, &arg); + if (ret) + goto out; break; case Opt_res_all_tg_pt: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; all_tg_pt = (int)arg; break; case Opt_mapped_lun: - match_int(args, &arg); - mapped_lun = (u32)arg; + ret = match_int(args, &arg); + if (ret) + goto out; + mapped_lun = (u64)arg; break; /* * PR APTPL Metadata for Target Port */ case Opt_target_fabric: - t_fabric = match_strdup(&args[0]); + t_fabric = match_strdup(args); if (!t_fabric) { ret = -ENOMEM; goto out; } break; case Opt_target_node: - t_port = match_strdup(&args[0]); + t_port = match_strdup(args); if (!t_port) { ret = -ENOMEM; goto out; @@ -1369,16 +1614,21 @@ } break; case Opt_tpgt: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; tpgt = (u16)arg; break; case Opt_port_rtpi: - match_int(args, &arg); - port_rpti = (u16)arg; + ret = match_int(args, &arg); + if (ret) + goto out; break; case Opt_target_lun: - match_int(args, &arg); - target_lun = (u32)arg; + ret = match_int(args, &arg); + if (ret) + goto out; + target_lun = (u64)arg; break; default: break; @@ -1411,81 +1661,65 @@ return (ret == 0) ? count : ret; } -SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR); -CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group); +CONFIGFS_ATTR_RO(target_pr_, res_holder); +CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts); +CONFIGFS_ATTR_RO(target_pr_, res_pr_generation); +CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port); +CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts); +CONFIGFS_ATTR_RO(target_pr_, res_pr_type); +CONFIGFS_ATTR_RO(target_pr_, res_type); +CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active); +CONFIGFS_ATTR(target_pr_, res_aptpl_metadata); static struct configfs_attribute *target_core_dev_pr_attrs[] = { - &target_core_dev_pr_res_holder.attr, - &target_core_dev_pr_res_pr_all_tgt_pts.attr, - &target_core_dev_pr_res_pr_generation.attr, - &target_core_dev_pr_res_pr_holder_tg_port.attr, - &target_core_dev_pr_res_pr_registered_i_pts.attr, - &target_core_dev_pr_res_pr_type.attr, - &target_core_dev_pr_res_type.attr, - &target_core_dev_pr_res_aptpl_active.attr, - &target_core_dev_pr_res_aptpl_metadata.attr, + &target_pr_attr_res_holder, + &target_pr_attr_res_pr_all_tgt_pts, + &target_pr_attr_res_pr_generation, + &target_pr_attr_res_pr_holder_tg_port, + &target_pr_attr_res_pr_registered_i_pts, + &target_pr_attr_res_pr_type, + &target_pr_attr_res_type, + &target_pr_attr_res_aptpl_active, + &target_pr_attr_res_aptpl_metadata, NULL, }; -static struct configfs_item_operations target_core_dev_pr_ops = { - .show_attribute = target_core_dev_pr_attr_show, - .store_attribute = target_core_dev_pr_attr_store, -}; +TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs); -static struct config_item_type target_core_dev_pr_cit = { - .ct_item_ops = &target_core_dev_pr_ops, - .ct_attrs = target_core_dev_pr_attrs, - .ct_owner = THIS_MODULE, -}; +/* End functions for struct config_item_type tb_dev_pr_cit */ -/* End functions for struct config_item_type target_core_dev_pr_cit */ +/* Start functions for struct config_item_type tb_dev_cit */ -/* Start functions for struct config_item_type target_core_dev_cit */ +static inline struct se_device *to_device(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_device, dev_group); +} -static ssize_t target_core_show_dev_info(void *p, char *page) +static ssize_t target_dev_info_show(struct config_item *item, char *page) { - struct se_device *dev = p; - struct se_subsystem_api *t = dev->transport; + struct se_device *dev = to_device(item); int bl = 0; ssize_t read_bytes = 0; transport_dump_dev_state(dev, page, &bl); read_bytes += bl; - read_bytes += t->show_configfs_dev_params(dev, page+read_bytes); + read_bytes += dev->transport->show_configfs_dev_params(dev, + page+read_bytes); return read_bytes; } -static struct target_core_configfs_attribute target_core_attr_dev_info = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "info", - .ca_mode = S_IRUGO }, - .show = target_core_show_dev_info, - .store = NULL, -}; - -static ssize_t target_core_store_dev_control( - void *p, - const char *page, - size_t count) +static ssize_t target_dev_control_store(struct config_item *item, + const char *page, size_t count) { - struct se_device *dev = p; - struct se_subsystem_api *t = dev->transport; + struct se_device *dev = to_device(item); - return t->set_configfs_dev_params(dev, page, count); + return dev->transport->set_configfs_dev_params(dev, page, count); } -static struct target_core_configfs_attribute target_core_attr_dev_control = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "control", - .ca_mode = S_IWUSR }, - .show = NULL, - .store = target_core_store_dev_control, -}; - -static ssize_t target_core_show_dev_alias(void *p, char *page) +static ssize_t target_dev_alias_show(struct config_item *item, char *page) { - struct se_device *dev = p; + struct se_device *dev = to_device(item); if (!(dev->dev_flags & DF_USING_ALIAS)) return 0; @@ -1493,12 +1727,10 @@ return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias); } -static ssize_t target_core_store_dev_alias( - void *p, - const char *page, - size_t count) +static ssize_t target_dev_alias_store(struct config_item *item, + const char *page, size_t count) { - struct se_device *dev = p; + struct se_device *dev = to_device(item); struct se_hba *hba = dev->se_hba; ssize_t read_bytes; @@ -1525,17 +1757,9 @@ return read_bytes; } -static struct target_core_configfs_attribute target_core_attr_dev_alias = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "alias", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = target_core_show_dev_alias, - .store = target_core_store_dev_alias, -}; - -static ssize_t target_core_show_dev_udev_path(void *p, char *page) +static ssize_t target_dev_udev_path_show(struct config_item *item, char *page) { - struct se_device *dev = p; + struct se_device *dev = to_device(item); if (!(dev->dev_flags & DF_USING_UDEV_PATH)) return 0; @@ -1543,12 +1767,10 @@ return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path); } -static ssize_t target_core_store_dev_udev_path( - void *p, - const char *page, - size_t count) +static ssize_t target_dev_udev_path_store(struct config_item *item, + const char *page, size_t count) { - struct se_device *dev = p; + struct se_device *dev = to_device(item); struct se_hba *hba = dev->se_hba; ssize_t read_bytes; @@ -1576,27 +1798,17 @@ return read_bytes; } -static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "udev_path", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = target_core_show_dev_udev_path, - .store = target_core_store_dev_udev_path, -}; - -static ssize_t target_core_show_dev_enable(void *p, char *page) +static ssize_t target_dev_enable_show(struct config_item *item, char *page) { - struct se_device *dev = p; + struct se_device *dev = to_device(item); return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED)); } -static ssize_t target_core_store_dev_enable( - void *p, - const char *page, - size_t count) +static ssize_t target_dev_enable_store(struct config_item *item, + const char *page, size_t count) { - struct se_device *dev = p; + struct se_device *dev = to_device(item); char *ptr; int ret; @@ -1613,17 +1825,9 @@ return count; } -static struct target_core_configfs_attribute target_core_attr_dev_enable = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "enable", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = target_core_show_dev_enable, - .store = target_core_store_dev_enable, -}; - -static ssize_t target_core_show_alua_lu_gp(void *p, char *page) +static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page) { - struct se_device *dev = p; + struct se_device *dev = to_device(item); struct config_item *lu_ci; struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem; @@ -1645,12 +1849,10 @@ return len; } -static ssize_t target_core_store_alua_lu_gp( - void *p, - const char *page, - size_t count) +static ssize_t target_dev_alua_lu_gp_store(struct config_item *item, + const char *page, size_t count) { - struct se_device *dev = p; + struct se_device *dev = to_device(item); struct se_hba *hba = dev->se_hba; struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; struct t10_alua_lu_gp_member *lu_gp_mem; @@ -1659,7 +1861,7 @@ lu_gp_mem = dev->dev_alua_lu_gp_mem; if (!lu_gp_mem) - return 0; + return count; if (count > LU_GROUP_NAME_BUF) { pr_err("ALUA LU Group Alias too large!\n"); @@ -1727,123 +1929,233 @@ return count; } -static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "alua_lu_gp", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = target_core_show_alua_lu_gp, - .store = target_core_store_alua_lu_gp, -}; - -static struct configfs_attribute *lio_core_dev_attrs[] = { - &target_core_attr_dev_info.attr, - &target_core_attr_dev_control.attr, - &target_core_attr_dev_alias.attr, - &target_core_attr_dev_udev_path.attr, - &target_core_attr_dev_enable.attr, - &target_core_attr_dev_alua_lu_gp.attr, - NULL, -}; - -static void target_core_dev_release(struct config_item *item) +static ssize_t target_dev_lba_map_show(struct config_item *item, char *page) { - struct config_group *dev_cg = to_config_group(item); - struct se_device *dev = - container_of(dev_cg, struct se_device, dev_group); + struct se_device *dev = to_device(item); + struct t10_alua_lba_map *map; + struct t10_alua_lba_map_member *mem; + char *b = page; + int bl = 0; + char state; - kfree(dev_cg->default_groups); - target_free_device(dev); + spin_lock(&dev->t10_alua.lba_map_lock); + if (!list_empty(&dev->t10_alua.lba_map_list)) + bl += sprintf(b + bl, "%u %u\n", + dev->t10_alua.lba_map_segment_size, + dev->t10_alua.lba_map_segment_multiplier); + list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { + bl += sprintf(b + bl, "%llu %llu", + map->lba_map_first_lba, map->lba_map_last_lba); + list_for_each_entry(mem, &map->lba_map_mem_list, + lba_map_mem_list) { + switch (mem->lba_map_mem_alua_state) { + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + state = 'O'; + break; + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + state = 'A'; + break; + case ALUA_ACCESS_STATE_STANDBY: + state = 'S'; + break; + case ALUA_ACCESS_STATE_UNAVAILABLE: + state = 'U'; + break; + default: + state = '.'; + break; + } + bl += sprintf(b + bl, " %d:%c", + mem->lba_map_mem_alua_pg_id, state); + } + bl += sprintf(b + bl, "\n"); + } + spin_unlock(&dev->t10_alua.lba_map_lock); + return bl; } -static ssize_t target_core_dev_show(struct config_item *item, - struct configfs_attribute *attr, - char *page) +static ssize_t target_dev_lba_map_store(struct config_item *item, + const char *page, size_t count) { - struct config_group *dev_cg = to_config_group(item); - struct se_device *dev = - container_of(dev_cg, struct se_device, dev_group); - struct target_core_configfs_attribute *tc_attr = container_of( - attr, struct target_core_configfs_attribute, attr); + struct se_device *dev = to_device(item); + struct t10_alua_lba_map *lba_map = NULL; + struct list_head lba_list; + char *map_entries, *ptr; + char state; + int pg_num = -1, pg; + int ret = 0, num = 0, pg_id, alua_state; + unsigned long start_lba = -1, end_lba = -1; + unsigned long segment_size = -1, segment_mult = -1; - if (!tc_attr->show) - return -EINVAL; + map_entries = kstrdup(page, GFP_KERNEL); + if (!map_entries) + return -ENOMEM; + + INIT_LIST_HEAD(&lba_list); + while ((ptr = strsep(&map_entries, "\n")) != NULL) { + if (!*ptr) + continue; + + if (num == 0) { + if (sscanf(ptr, "%lu %lu\n", + &segment_size, &segment_mult) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + num++; + continue; + } + if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing end lba\n", num); + ret = -EINVAL; + break; + } + ptr++; + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing state definitions\n", + num); + ret = -EINVAL; + break; + } + ptr++; + lba_map = core_alua_allocate_lba_map(&lba_list, + start_lba, end_lba); + if (IS_ERR(lba_map)) { + ret = PTR_ERR(lba_map); + break; + } + pg = 0; + while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { + switch (state) { + case 'O': + alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; + break; + case 'A': + alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; + break; + case 'S': + alua_state = ALUA_ACCESS_STATE_STANDBY; + break; + case 'U': + alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; + break; + default: + pr_err("Invalid ALUA state '%c'\n", state); + ret = -EINVAL; + goto out; + } - return tc_attr->show(dev, page); + ret = core_alua_allocate_lba_map_mem(lba_map, + pg_id, alua_state); + if (ret) { + pr_err("Invalid target descriptor %d:%c " + "at line %d\n", + pg_id, state, num); + break; + } + pg++; + ptr = strchr(ptr, ' '); + if (ptr) + ptr++; + else + break; + } + if (pg_num == -1) + pg_num = pg; + else if (pg != pg_num) { + pr_err("Only %d from %d port groups definitions " + "at line %d\n", pg, pg_num, num); + ret = -EINVAL; + break; + } + num++; + } +out: + if (ret) { + core_alua_free_lba_map(&lba_list); + count = ret; + } else + core_alua_set_lba_map(dev, &lba_list, + segment_size, segment_mult); + kfree(map_entries); + return count; } -static ssize_t target_core_dev_store(struct config_item *item, - struct configfs_attribute *attr, - const char *page, size_t count) +CONFIGFS_ATTR_RO(target_dev_, info); +CONFIGFS_ATTR_WO(target_dev_, control); +CONFIGFS_ATTR(target_dev_, alias); +CONFIGFS_ATTR(target_dev_, udev_path); +CONFIGFS_ATTR(target_dev_, enable); +CONFIGFS_ATTR(target_dev_, alua_lu_gp); +CONFIGFS_ATTR(target_dev_, lba_map); + +static struct configfs_attribute *target_core_dev_attrs[] = { + &target_dev_attr_info, + &target_dev_attr_control, + &target_dev_attr_alias, + &target_dev_attr_udev_path, + &target_dev_attr_enable, + &target_dev_attr_alua_lu_gp, + &target_dev_attr_lba_map, + NULL, +}; + +static void target_core_dev_release(struct config_item *item) { struct config_group *dev_cg = to_config_group(item); struct se_device *dev = container_of(dev_cg, struct se_device, dev_group); - struct target_core_configfs_attribute *tc_attr = container_of( - attr, struct target_core_configfs_attribute, attr); - - if (!tc_attr->store) - return -EINVAL; - return tc_attr->store(dev, page, count); + kfree(dev_cg->default_groups); + target_free_device(dev); } static struct configfs_item_operations target_core_dev_item_ops = { .release = target_core_dev_release, - .show_attribute = target_core_dev_show, - .store_attribute = target_core_dev_store, }; -static struct config_item_type target_core_dev_cit = { - .ct_item_ops = &target_core_dev_item_ops, - .ct_attrs = lio_core_dev_attrs, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs); -/* End functions for struct config_item_type target_core_dev_cit */ +/* End functions for struct config_item_type tb_dev_cit */ /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ -CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp); -#define SE_DEV_ALUA_LU_ATTR(_name, _mode) \ -static struct target_core_alua_lu_gp_attribute \ - target_core_alua_lu_gp_##_name = \ - __CONFIGFS_EATTR(_name, _mode, \ - target_core_alua_lu_gp_show_attr_##_name, \ - target_core_alua_lu_gp_store_attr_##_name); - -#define SE_DEV_ALUA_LU_ATTR_RO(_name) \ -static struct target_core_alua_lu_gp_attribute \ - target_core_alua_lu_gp_##_name = \ - __CONFIGFS_EATTR_RO(_name, \ - target_core_alua_lu_gp_show_attr_##_name); +static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item) +{ + return container_of(to_config_group(item), struct t10_alua_lu_gp, + lu_gp_group); +} -/* - * lu_gp_id - */ -static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id( - struct t10_alua_lu_gp *lu_gp, - char *page) +static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page) { + struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); + if (!lu_gp->lu_gp_valid_id) return 0; - return sprintf(page, "%hu\n", lu_gp->lu_gp_id); } -static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( - struct t10_alua_lu_gp *lu_gp, - const char *page, - size_t count) +static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item, + const char *page, size_t count) { + struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group; unsigned long lu_gp_id; int ret; - ret = strict_strtoul(page, 0, &lu_gp_id); + ret = kstrtoul(page, 0, &lu_gp_id); if (ret < 0) { - pr_err("strict_strtoul() returned %d for" + pr_err("kstrtoul() returned %d for" " lu_gp_id\n", ret); - return -EINVAL; + return ret; } if (lu_gp_id > 0x0000ffff) { pr_err("ALUA lu_gp_id: %lu exceeds maximum:" @@ -1863,15 +2175,9 @@ return count; } -SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR); - -/* - * members - */ -static ssize_t target_core_alua_lu_gp_show_attr_members( - struct t10_alua_lu_gp *lu_gp, - char *page) +static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) { + struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); struct se_device *dev; struct se_hba *hba; struct t10_alua_lu_gp_member *lu_gp_mem; @@ -1903,13 +2209,12 @@ return len; } -SE_DEV_ALUA_LU_ATTR_RO(members); - -CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group); +CONFIGFS_ATTR(target_lu_gp_, lu_gp_id); +CONFIGFS_ATTR_RO(target_lu_gp_, members); static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { - &target_core_alua_lu_gp_lu_gp_id.attr, - &target_core_alua_lu_gp_members.attr, + &target_lu_gp_attr_lu_gp_id, + &target_lu_gp_attr_members, NULL, }; @@ -1923,8 +2228,6 @@ static struct configfs_item_operations target_core_alua_lu_gp_ops = { .release = target_core_alua_lu_gp_release, - .show_attribute = target_core_alua_lu_gp_attr_show, - .store_attribute = target_core_alua_lu_gp_attr_store, }; static struct config_item_type target_core_alua_lu_gp_cit = { @@ -1995,42 +2298,29 @@ /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ -CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp); -#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \ -static struct target_core_alua_tg_pt_gp_attribute \ - target_core_alua_tg_pt_gp_##_name = \ - __CONFIGFS_EATTR(_name, _mode, \ - target_core_alua_tg_pt_gp_show_attr_##_name, \ - target_core_alua_tg_pt_gp_store_attr_##_name); - -#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \ -static struct target_core_alua_tg_pt_gp_attribute \ - target_core_alua_tg_pt_gp_##_name = \ - __CONFIGFS_EATTR_RO(_name, \ - target_core_alua_tg_pt_gp_show_attr_##_name); +static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item) +{ + return container_of(to_config_group(item), struct t10_alua_tg_pt_gp, + tg_pt_gp_group); +} -/* - * alua_access_state - */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item, + char *page) { return sprintf(page, "%d\n", - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state)); + atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( - struct t10_alua_tg_pt_gp *tg_pt_gp, - const char *page, - size_t count) +static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, + const char *page, size_t count) { + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; unsigned long tmp; int new_state, ret; if (!tg_pt_gp->tg_pt_gp_valid_id) { - pr_err("Unable to do implict ALUA on non valid" + pr_err("Unable to do implicit ALUA on non valid" " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } @@ -2040,17 +2330,24 @@ return -ENODEV; } - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract new ALUA access state from" " %s\n", page); - return -EINVAL; + return ret; } new_state = (int)tmp; - if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { - pr_err("Unable to process implict configfs ALUA" - " transition while TPGS_IMPLICT_ALUA is disabled\n"); + if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) { + pr_err("Unable to process implicit configfs ALUA" + " transition while TPGS_IMPLICIT_ALUA is disabled\n"); + return -EINVAL; + } + if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && + new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { + /* LBA DEPENDENT is only allowed with implicit ALUA */ + pr_err("Unable to process implicit configfs ALUA transition" + " while explicit ALUA management is enabled\n"); return -EINVAL; } @@ -2059,24 +2356,18 @@ return (!ret) ? count : -EINVAL; } -SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR); - -/* - * alua_access_status - */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item, + char *page) { + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); return sprintf(page, "%s\n", core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status)); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( - struct t10_alua_tg_pt_gp *tg_pt_gp, - const char *page, - size_t count) +static ssize_t target_tg_pt_gp_alua_access_status_store( + struct config_item *item, const char *page, size_t count) { + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); unsigned long tmp; int new_status, ret; @@ -2087,17 +2378,17 @@ return -EINVAL; } - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract new ALUA access status" " from %s\n", page); - return -EINVAL; + return ret; } new_status = (int)tmp; if ((new_status != ALUA_STATUS_NONE) && - (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && - (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { + (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { pr_err("Illegal ALUA access status: 0x%02x\n", new_status); return -EINVAL; @@ -2107,50 +2398,84 @@ return count; } -SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR); - -/* - * alua_access_type - */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item, + char *page) { - return core_alua_show_access_type(tg_pt_gp, page); + return core_alua_show_access_type(to_tg_pt_gp(item), page); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type( - struct t10_alua_tg_pt_gp *tg_pt_gp, - const char *page, - size_t count) +static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item, + const char *page, size_t count) { - return core_alua_store_access_type(tg_pt_gp, page, count); + return core_alua_store_access_type(to_tg_pt_gp(item), page, count); } -SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); +#define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \ +static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \ + struct config_item *item, char *p) \ +{ \ + struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ + return sprintf(p, "%d\n", \ + !!(t->tg_pt_gp_alua_supported_states & _bit)); \ +} \ + \ +static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \ + struct config_item *item, const char *p, size_t c) \ +{ \ + struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ + unsigned long tmp; \ + int ret; \ + \ + if (!t->tg_pt_gp_valid_id) { \ + pr_err("Unable to do set ##_name ALUA state on non" \ + " valid tg_pt_gp ID: %hu\n", \ + t->tg_pt_gp_valid_id); \ + return -EINVAL; \ + } \ + \ + ret = kstrtoul(p, 0, &tmp); \ + if (ret < 0) { \ + pr_err("Invalid value '%s', must be '0' or '1'\n", p); \ + return -EINVAL; \ + } \ + if (tmp > 1) { \ + pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ + return -EINVAL; \ + } \ + if (tmp) \ + t->tg_pt_gp_alua_supported_states |= _bit; \ + else \ + t->tg_pt_gp_alua_supported_states &= ~_bit; \ + \ + return c; \ +} -/* - * alua_write_metadata - */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP); +ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP); +ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP); +ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP); +ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP); +ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP); +ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP); + +static ssize_t target_tg_pt_gp_alua_write_metadata_show( + struct config_item *item, char *page) { - return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata); + return sprintf(page, "%d\n", + to_tg_pt_gp(item)->tg_pt_gp_write_metadata); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata( - struct t10_alua_tg_pt_gp *tg_pt_gp, - const char *page, - size_t count) +static ssize_t target_tg_pt_gp_alua_write_metadata_store( + struct config_item *item, const char *page, size_t count) { + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { pr_err("Unable to extract alua_write_metadata\n"); - return -EINVAL; + return ret; } if ((tmp != 0) && (tmp != 1)) { @@ -2163,119 +2488,80 @@ return count; } -SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR); - - - -/* - * nonop_delay_msecs - */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item, + char *page) { - return core_alua_show_nonop_delay_msecs(tg_pt_gp, page); - + return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs( - struct t10_alua_tg_pt_gp *tg_pt_gp, - const char *page, - size_t count) +static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item, + const char *page, size_t count) { - return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count); + return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page, + count); } -SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR); - -/* - * trans_delay_msecs - */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item, + char *page) { - return core_alua_show_trans_delay_msecs(tg_pt_gp, page); + return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( - struct t10_alua_tg_pt_gp *tg_pt_gp, - const char *page, - size_t count) +static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item, + const char *page, size_t count) { - return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count); + return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page, + count); } -SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); - -/* - * implict_trans_secs - */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +static ssize_t target_tg_pt_gp_implicit_trans_secs_show( + struct config_item *item, char *page) { - return core_alua_show_implict_trans_secs(tg_pt_gp, page); + return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( - struct t10_alua_tg_pt_gp *tg_pt_gp, - const char *page, - size_t count) +static ssize_t target_tg_pt_gp_implicit_trans_secs_store( + struct config_item *item, const char *page, size_t count) { - return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); + return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page, + count); } -SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); - -/* - * preferred - */ - -static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item, + char *page) { - return core_alua_show_preferred_bit(tg_pt_gp, page); + return core_alua_show_preferred_bit(to_tg_pt_gp(item), page); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred( - struct t10_alua_tg_pt_gp *tg_pt_gp, - const char *page, - size_t count) +static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item, + const char *page, size_t count) { - return core_alua_store_preferred_bit(tg_pt_gp, page, count); + return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count); } -SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR); - -/* - * tg_pt_gp_id - */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item, + char *page) { + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); + if (!tg_pt_gp->tg_pt_gp_valid_id) return 0; - return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( - struct t10_alua_tg_pt_gp *tg_pt_gp, - const char *page, - size_t count) +static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item, + const char *page, size_t count) { + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; unsigned long tg_pt_gp_id; int ret; - ret = strict_strtoul(page, 0, &tg_pt_gp_id); + ret = kstrtoul(page, 0, &tg_pt_gp_id); if (ret < 0) { - pr_err("strict_strtoul() returned %d for" + pr_err("kstrtoul() returned %d for" " tg_pt_gp_id\n", ret); - return -EINVAL; + return ret; } if (tg_pt_gp_id > 0x0000ffff) { pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" @@ -2295,30 +2581,20 @@ return count; } -SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR); - -/* - * members - */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_members( - struct t10_alua_tg_pt_gp *tg_pt_gp, - char *page) +static ssize_t target_tg_pt_gp_members_show(struct config_item *item, + char *page) { - struct se_port *port; - struct se_portal_group *tpg; + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); struct se_lun *lun; - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; ssize_t len = 0, cur_len; unsigned char buf[TG_PT_GROUP_NAME_BUF]; memset(buf, 0, TG_PT_GROUP_NAME_BUF); spin_lock(&tg_pt_gp->tg_pt_gp_lock); - list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, - tg_pt_gp_mem_list) { - port = tg_pt_gp_mem->tg_pt; - tpg = port->sep_tpg; - lun = port->sep_lun; + list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, + lun_tg_pt_gp_link) { + struct se_portal_group *tpg = lun->lun_tpg; cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), @@ -2340,22 +2616,42 @@ return len; } -SE_DEV_ALUA_TG_PT_ATTR_RO(members); - -CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp, - tg_pt_gp_group); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata); +CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs); +CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs); +CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs); +CONFIGFS_ATTR(target_tg_pt_gp_, preferred); +CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id); +CONFIGFS_ATTR_RO(target_tg_pt_gp_, members); static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { - &target_core_alua_tg_pt_gp_alua_access_state.attr, - &target_core_alua_tg_pt_gp_alua_access_status.attr, - &target_core_alua_tg_pt_gp_alua_access_type.attr, - &target_core_alua_tg_pt_gp_alua_write_metadata.attr, - &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, - &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, - &target_core_alua_tg_pt_gp_implict_trans_secs.attr, - &target_core_alua_tg_pt_gp_preferred.attr, - &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, - &target_core_alua_tg_pt_gp_members.attr, + &target_tg_pt_gp_attr_alua_access_state, + &target_tg_pt_gp_attr_alua_access_status, + &target_tg_pt_gp_attr_alua_access_type, + &target_tg_pt_gp_attr_alua_support_transitioning, + &target_tg_pt_gp_attr_alua_support_offline, + &target_tg_pt_gp_attr_alua_support_lba_dependent, + &target_tg_pt_gp_attr_alua_support_unavailable, + &target_tg_pt_gp_attr_alua_support_standby, + &target_tg_pt_gp_attr_alua_support_active_nonoptimized, + &target_tg_pt_gp_attr_alua_support_active_optimized, + &target_tg_pt_gp_attr_alua_write_metadata, + &target_tg_pt_gp_attr_nonop_delay_msecs, + &target_tg_pt_gp_attr_trans_delay_msecs, + &target_tg_pt_gp_attr_implicit_trans_secs, + &target_tg_pt_gp_attr_preferred, + &target_tg_pt_gp_attr_tg_pt_gp_id, + &target_tg_pt_gp_attr_members, NULL, }; @@ -2369,8 +2665,6 @@ static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { .release = target_core_alua_tg_pt_gp_release, - .show_attribute = target_core_alua_tg_pt_gp_attr_show, - .store_attribute = target_core_alua_tg_pt_gp_attr_store, }; static struct config_item_type target_core_alua_tg_pt_gp_cit = { @@ -2381,7 +2675,7 @@ /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ -/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ +/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */ static struct config_group *target_core_alua_create_tg_pt_gp( struct config_group *group, @@ -2432,12 +2726,9 @@ .drop_item = &target_core_alua_drop_tg_pt_gp, }; -static struct config_item_type target_core_alua_tg_pt_gps_cit = { - .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL); -/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ +/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */ /* Start functions for struct config_item_type target_core_alua_cit */ @@ -2455,7 +2746,7 @@ /* End functions for struct config_item_type target_core_alua_cit */ -/* Start functions for struct config_item_type target_core_stat_cit */ +/* Start functions for struct config_item_type tb_dev_stat_cit */ static struct config_group *target_core_stat_mkdir( struct config_group *group, @@ -2476,12 +2767,9 @@ .drop_item = &target_core_stat_rmdir, }; -static struct config_item_type target_core_stat_cit = { - .ct_group_ops = &target_core_stat_group_ops, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL); -/* End functions for struct config_item_type target_core_stat_cit */ +/* End functions for struct config_item_type tb_dev_stat_cit */ /* Start functions for struct config_item_type target_core_hba_cit */ @@ -2490,9 +2778,9 @@ const char *name) { struct t10_alua_tg_pt_gp *tg_pt_gp; - struct se_subsystem_api *t; struct config_item *hba_ci = &group->cg_item; struct se_hba *hba = item_to_hba(hba_ci); + struct target_backend *tb = hba->backend; struct se_device *dev; struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; struct config_group *dev_stat_grp = NULL; @@ -2501,10 +2789,6 @@ ret = mutex_lock_interruptible(&hba->hba_access_mutex); if (ret) return ERR_PTR(ret); - /* - * Locate the struct se_subsystem_api from parent's struct se_hba. - */ - t = hba->transport; dev = target_alloc_device(hba, name); if (!dev) @@ -2517,17 +2801,17 @@ if (!dev_cg->default_groups) goto out_free_device; - config_group_init_type_name(dev_cg, name, &target_core_dev_cit); + config_group_init_type_name(dev_cg, name, &tb->tb_dev_cit); config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", - &target_core_dev_attrib_cit); + &tb->tb_dev_attrib_cit); config_group_init_type_name(&dev->dev_pr_group, "pr", - &target_core_dev_pr_cit); + &tb->tb_dev_pr_cit); config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", - &target_core_dev_wwn_cit); + &tb->tb_dev_wwn_cit); config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, - "alua", &target_core_alua_tg_pt_gps_cit); + "alua", &tb->tb_dev_alua_tg_pt_gps_cit); config_group_init_type_name(&dev->dev_stat_grps.stat_group, - "statistics", &target_core_stat_cit); + "statistics", &tb->tb_dev_stat_cit); dev_cg->default_groups[0] = &dev->dev_attrib.da_group; dev_cg->default_groups[1] = &dev->dev_pr_group; @@ -2638,34 +2922,24 @@ .drop_item = target_core_drop_subdev, }; -CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba); -#define SE_HBA_ATTR(_name, _mode) \ -static struct target_core_hba_attribute \ - target_core_hba_##_name = \ - __CONFIGFS_EATTR(_name, _mode, \ - target_core_hba_show_attr_##_name, \ - target_core_hba_store_attr_##_name); - -#define SE_HBA_ATTR_RO(_name) \ -static struct target_core_hba_attribute \ - target_core_hba_##_name = \ - __CONFIGFS_EATTR_RO(_name, \ - target_core_hba_show_attr_##_name); - -static ssize_t target_core_hba_show_attr_hba_info( - struct se_hba *hba, - char *page) + +static inline struct se_hba *to_hba(struct config_item *item) { - return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", - hba->hba_id, hba->transport->name, - TARGET_CORE_CONFIGFS_VERSION); + return container_of(to_config_group(item), struct se_hba, hba_group); } -SE_HBA_ATTR_RO(hba_info); +static ssize_t target_hba_info_show(struct config_item *item, char *page) +{ + struct se_hba *hba = to_hba(item); -static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba, - char *page) + return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", + hba->hba_id, hba->backend->ops->name, + TARGET_CORE_VERSION); +} + +static ssize_t target_hba_mode_show(struct config_item *item, char *page) { + struct se_hba *hba = to_hba(item); int hba_mode = 0; if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) @@ -2674,20 +2948,20 @@ return sprintf(page, "%d\n", hba_mode); } -static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, - const char *page, size_t count) +static ssize_t target_hba_mode_store(struct config_item *item, + const char *page, size_t count) { - struct se_subsystem_api *transport = hba->transport; + struct se_hba *hba = to_hba(item); unsigned long mode_flag; int ret; - if (transport->pmode_enable_hba == NULL) + if (hba->backend->ops->pmode_enable_hba == NULL) return -EINVAL; - ret = strict_strtoul(page, 0, &mode_flag); + ret = kstrtoul(page, 0, &mode_flag); if (ret < 0) { pr_err("Unable to extract hba mode flag: %d\n", ret); - return -EINVAL; + return ret; } if (hba->dev_count) { @@ -2695,7 +2969,7 @@ return -EINVAL; } - ret = transport->pmode_enable_hba(hba, mode_flag); + ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag); if (ret < 0) return -EINVAL; if (ret > 0) @@ -2706,9 +2980,8 @@ return count; } -SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); - -CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); +CONFIGFS_ATTR_RO(target_, hba_info); +CONFIGFS_ATTR(target_, hba_mode); static void target_core_hba_release(struct config_item *item) { @@ -2718,15 +2991,13 @@ } static struct configfs_attribute *target_core_hba_attrs[] = { - &target_core_hba_hba_info.attr, - &target_core_hba_hba_mode.attr, + &target_attr_hba_info, + &target_attr_hba_mode, NULL, }; static struct configfs_item_operations target_core_hba_item_ops = { .release = target_core_hba_release, - .show_attribute = target_core_hba_attr_show, - .store_attribute = target_core_hba_attr_store, }; static struct config_item_type target_core_hba_cit = { @@ -2775,11 +3046,11 @@ str++; /* Skip to start of plugin dependent ID */ } - ret = strict_strtoul(str, 0, &plugin_dep_id); + ret = kstrtoul(str, 0, &plugin_dep_id); if (ret < 0) { - pr_err("strict_strtoul() returned %d for" + pr_err("kstrtoul() returned %d for" " plugin_dep_id\n", ret); - return ERR_PTR(-EINVAL); + return ERR_PTR(ret); } /* * Load up TCM subsystem plugins if they have not already been loaded. @@ -2821,11 +3092,21 @@ /* Stop functions for struct config_item_type target_core_hba_cit */ +void target_setup_backend_cits(struct target_backend *tb) +{ + target_core_setup_dev_cit(tb); + target_core_setup_dev_attrib_cit(tb); + target_core_setup_dev_pr_cit(tb); + target_core_setup_dev_wwn_cit(tb); + target_core_setup_dev_alua_tg_pt_gps_cit(tb); + target_core_setup_dev_stat_cit(tb); +} + static int __init target_core_init_configfs(void) { struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; struct config_group *lu_gp_cg = NULL; - struct configfs_subsystem *subsys; + struct configfs_subsystem *subsys = &target_core_fabrics; struct t10_alua_lu_gp *lu_gp; int ret; @@ -2833,7 +3114,6 @@ " Engine: %s on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); - subsys = target_core_subsystem[0]; config_group_init(&subsys->su_group); mutex_init(&subsys->su_mutex); @@ -2845,7 +3125,7 @@ * and ALUA Logical Unit Group and Target Port Group infrastructure. */ target_cg = &subsys->su_group; - target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2, + target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!target_cg->default_groups) { pr_err("Unable to allocate target_cg->default_groups\n"); @@ -2922,7 +3202,7 @@ goto out_global; } pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" - " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" + " Infrastructure: "TARGET_CORE_VERSION" on %s/%s" " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); /* * Register built-in RAMDISK subsystem logic for virtual LUN 0 @@ -2935,6 +3215,10 @@ if (ret < 0) goto out; + ret = target_xcopy_setup_pt(); + if (ret < 0) + goto out; + return 0; out: @@ -2959,13 +3243,10 @@ static void __exit target_core_exit_configfs(void) { - struct configfs_subsystem *subsys; struct config_group *hba_cg, *alua_cg, *lu_gp_cg; struct config_item *item; int i; - subsys = target_core_subsystem[0]; - lu_gp_cg = &alua_lu_gps_group; for (i = 0; lu_gp_cg->default_groups[i]; i++) { item = &lu_gp_cg->default_groups[i]->cg_item; @@ -2996,8 +3277,8 @@ * We expect subsys->su_group.default_groups to be released * by configfs subsystem provider logic.. */ - configfs_unregister_subsystem(subsys); - kfree(subsys->su_group.default_groups); + configfs_unregister_subsystem(&target_core_fabrics); + kfree(target_core_fabrics.su_group.default_groups); core_alua_free_lu_gp(default_lu_gp); default_lu_gp = NULL; @@ -3007,6 +3288,7 @@ core_dev_release_virtual_lun0(); rd_module_exit(); + target_xcopy_release_pt(); release_se_kmem_caches(); }