--- zzzz-none-000/linux-3.10.107/drivers/rapidio/rio.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/rapidio/rio.c 2021-02-04 17:41:59.000000000 +0000 @@ -5,9 +5,8 @@ * Copyright 2005 MontaVista Software, Inc. * Matt Porter * - * Copyright 2009 Integrated Device Technology, Inc. + * Copyright 2009 - 2013 Integrated Device Technology, Inc. * Alex Bounine - * - Added Port-Write/Error Management initialization and handling * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -31,10 +30,22 @@ #include "rio.h" +MODULE_DESCRIPTION("RapidIO Subsystem Core"); +MODULE_AUTHOR("Matt Porter "); +MODULE_AUTHOR("Alexandre Bounine "); +MODULE_LICENSE("GPL"); + +static int hdid[RIO_MAX_MPORTS]; +static int ids_num; +module_param_array(hdid, int, &ids_num, 0); +MODULE_PARM_DESC(hdid, + "Destination ID assignment to local RapidIO controllers"); + static LIST_HEAD(rio_devices); static DEFINE_SPINLOCK(rio_global_list_lock); static LIST_HEAD(rio_mports); +static LIST_HEAD(rio_scans); static DEFINE_MUTEX(rio_mport_list_lock); static unsigned char next_portid; static DEFINE_SPINLOCK(rio_mmap_lock); @@ -580,44 +591,6 @@ EXPORT_SYMBOL_GPL(rio_set_port_lockout); /** - * rio_switch_init - Sets switch operations for a particular vendor switch - * @rdev: RIO device - * @do_enum: Enumeration/Discovery mode flag - * - * Searches the RIO switch ops table for known switch types. If the vid - * and did match a switch table entry, then call switch initialization - * routine to setup switch-specific routines. - */ -void rio_switch_init(struct rio_dev *rdev, int do_enum) -{ - struct rio_switch_ops *cur = __start_rio_switch_ops; - struct rio_switch_ops *end = __end_rio_switch_ops; - - while (cur < end) { - if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { - pr_debug("RIO: calling init routine for %s\n", - rio_name(rdev)); - cur->init_hook(rdev, do_enum); - break; - } - cur++; - } - - if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { - pr_debug("RIO: adding STD routing ops for %s\n", - rio_name(rdev)); - rdev->rswitch->add_entry = rio_std_route_add_entry; - rdev->rswitch->get_entry = rio_std_route_get_entry; - rdev->rswitch->clr_table = rio_std_route_clr_table; - } - - if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) - printk(KERN_ERR "RIO: missing routing ops for %s\n", - rio_name(rdev)); -} -EXPORT_SYMBOL_GPL(rio_switch_init); - -/** * rio_enable_rx_tx_port - enable input receiver and output transmitter of * given port * @port: Master port associated with the RIO network @@ -970,8 +943,8 @@ /* * Process the port-write notification from switch */ - if (rdev->rswitch->em_handle) - rdev->rswitch->em_handle(rdev, portnum); + if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle) + rdev->rswitch->ops->em_handle(rdev, portnum); rio_read_config_32(rdev, rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), @@ -1207,8 +1180,9 @@ * @route_destid: destID entry in the RT * @route_port: destination port for specified destID */ -int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 route_port) +static int +rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) { if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, @@ -1234,8 +1208,9 @@ * @route_destid: destID entry in the RT * @route_port: returned destination port for specified destID */ -int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 *route_port) +static int +rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) { u32 result; @@ -1259,8 +1234,9 @@ * @hopcount: Number of switch hops to the device * @table: routing table ID (global or port-specific) */ -int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, - u16 table) +static int +rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) { u32 max_destid = 0xff; u32 i, pef, id_inc = 1, ext_cfg = 0; @@ -1301,34 +1277,271 @@ return 0; } +/** + * rio_lock_device - Acquires host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * @wait_ms: Max wait time in msec (0 = no timeout) + * + * Attepts to acquire host device lock for specified device + * Returns 0 if device lock acquired or EINVAL if timeout expires. + */ +int rio_lock_device(struct rio_mport *port, u16 destid, + u8 hopcount, int wait_ms) +{ + u32 result; + int tcnt = 0; + + /* Attempt to acquire device lock */ + rio_mport_write_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + + while (result != port->host_deviceid) { + if (wait_ms != 0 && tcnt == wait_ms) { + pr_debug("RIO: timeout when locking device %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + /* Delay a bit */ + mdelay(1); + tcnt++; + /* Try to acquire device lock again */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_lock_device); + +/** + * rio_unlock_device - Releases host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * + * Returns 0 if device lock released or EINVAL if fails. + */ +int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) +{ + u32 result; + + /* Release device lock */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != 0xffff) { + pr_debug("RIO: badness when releasing device lock %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_unlock_device); + +/** + * rio_route_add_entry- Add a route entry to a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @route_destid: Destination ID to be routed + * @route_port: Port number to be routed + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific add_entry() method to add a route + * entry into a switch routing table. Otherwise uses standard RT update method + * as defined by RapidIO specification. A specific routing table can be selected + * using the @table argument if a switch has per port routing tables or + * the standard (or global) table may be used by passing + * %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_add_entry(struct rio_dev *rdev, + u16 table, u16 route_destid, u8 route_port, int lock) +{ + int rc = -EINVAL; + struct rio_switch_ops *ops = rdev->rswitch->ops; + + if (lock) { + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); + if (rc) + return rc; + } + + spin_lock(&rdev->rswitch->lock); + + if (ops == NULL || ops->add_entry == NULL) { + rc = rio_std_route_add_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, + route_destid, route_port); + } else if (try_module_get(ops->owner)) { + rc = ops->add_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, route_destid, + route_port); + module_put(ops->owner); + } + + spin_unlock(&rdev->rswitch->lock); + + if (lock) + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_route_add_entry); + +/** + * rio_route_get_entry- Read an entry from a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @route_destid: Destination ID to be routed + * @route_port: Pointer to read port number into + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific get_entry() method to fetch a route + * entry from a switch routing table. Otherwise uses standard RT read method + * as defined by RapidIO specification. A specific routing table can be selected + * using the @table argument if a switch has per port routing tables or + * the standard (or global) table may be used by passing + * %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_get_entry(struct rio_dev *rdev, u16 table, + u16 route_destid, u8 *route_port, int lock) +{ + int rc = -EINVAL; + struct rio_switch_ops *ops = rdev->rswitch->ops; + + if (lock) { + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); + if (rc) + return rc; + } + + spin_lock(&rdev->rswitch->lock); + + if (ops == NULL || ops->get_entry == NULL) { + rc = rio_std_route_get_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, + route_destid, route_port); + } else if (try_module_get(ops->owner)) { + rc = ops->get_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, route_destid, + route_port); + module_put(ops->owner); + } + + spin_unlock(&rdev->rswitch->lock); + + if (lock) + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); + return rc; +} +EXPORT_SYMBOL_GPL(rio_route_get_entry); + +/** + * rio_route_clr_table - Clear a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific clr_table() method to clear a switch + * routing table. Otherwise uses standard RT write method as defined by RapidIO + * specification. A specific routing table can be selected using the @table + * argument if a switch has per port routing tables or the standard (or global) + * table may be used by passing %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock) +{ + int rc = -EINVAL; + struct rio_switch_ops *ops = rdev->rswitch->ops; + + if (lock) { + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); + if (rc) + return rc; + } + + spin_lock(&rdev->rswitch->lock); + + if (ops == NULL || ops->clr_table == NULL) { + rc = rio_std_route_clr_table(rdev->net->hport, rdev->destid, + rdev->hopcount, table); + } else if (try_module_get(ops->owner)) { + rc = ops->clr_table(rdev->net->hport, rdev->destid, + rdev->hopcount, table); + + module_put(ops->owner); + } + + spin_unlock(&rdev->rswitch->lock); + + if (lock) + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_route_clr_table); + #ifdef CONFIG_RAPIDIO_DMA_ENGINE static bool rio_chan_filter(struct dma_chan *chan, void *arg) { - struct rio_dev *rdev = arg; + struct rio_mport *mport = arg; /* Check that DMA device belongs to the right MPORT */ - return (rdev->net->hport == - container_of(chan->device, struct rio_mport, dma)); + return mport == container_of(chan->device, struct rio_mport, dma); } /** - * rio_request_dma - request RapidIO capable DMA channel that supports - * specified target RapidIO device. - * @rdev: RIO device control structure + * rio_request_mport_dma - request RapidIO capable DMA channel associated + * with specified local RapidIO mport device. + * @mport: RIO mport to perform DMA data transfers * * Returns pointer to allocated DMA channel or NULL if failed. */ -struct dma_chan *rio_request_dma(struct rio_dev *rdev) +struct dma_chan *rio_request_mport_dma(struct rio_mport *mport) { dma_cap_mask_t mask; - struct dma_chan *dchan; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - dchan = dma_request_channel(mask, rio_chan_filter, rdev); + return dma_request_channel(mask, rio_chan_filter, mport); +} +EXPORT_SYMBOL_GPL(rio_request_mport_dma); - return dchan; +/** + * rio_request_dma - request RapidIO capable DMA channel that supports + * specified target RapidIO device. + * @rdev: RIO device associated with DMA transfer + * + * Returns pointer to allocated DMA channel or NULL if failed. + */ +struct dma_chan *rio_request_dma(struct rio_dev *rdev) +{ + return rio_request_mport_dma(rdev->net->hport); } EXPORT_SYMBOL_GPL(rio_request_dma); @@ -1343,10 +1556,10 @@ EXPORT_SYMBOL_GPL(rio_release_dma); /** - * rio_dma_prep_slave_sg - RapidIO specific wrapper + * rio_dma_prep_xfer - RapidIO specific wrapper * for device_prep_slave_sg callback defined by DMAENGINE. - * @rdev: RIO device control structure * @dchan: DMA channel to configure + * @destid: target RapidIO device destination ID * @data: RIO specific data descriptor * @direction: DMA data transfer direction (TO or FROM the device) * @flags: dmaengine defined flags @@ -1356,11 +1569,10 @@ * target RIO device. * Returns pointer to DMA transaction descriptor or NULL if failed. */ -struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, - struct dma_chan *dchan, struct rio_dma_data *data, +struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, + u16 destid, struct rio_dma_data *data, enum dma_transfer_direction direction, unsigned long flags) { - struct dma_async_tx_descriptor *txd = NULL; struct rio_dma_ext rio_ext; if (dchan->device->device_prep_slave_sg == NULL) { @@ -1368,15 +1580,35 @@ return NULL; } - rio_ext.destid = rdev->destid; + rio_ext.destid = destid; rio_ext.rio_addr_u = data->rio_addr_u; rio_ext.rio_addr = data->rio_addr; rio_ext.wr_type = data->wr_type; - txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, - direction, flags, &rio_ext); + return dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, + direction, flags, &rio_ext); +} +EXPORT_SYMBOL_GPL(rio_dma_prep_xfer); - return txd; +/** + * rio_dma_prep_slave_sg - RapidIO specific wrapper + * for device_prep_slave_sg callback defined by DMAENGINE. + * @rdev: RIO device control structure + * @dchan: DMA channel to configure + * @data: RIO specific data descriptor + * @direction: DMA data transfer direction (TO or FROM the device) + * @flags: dmaengine defined flags + * + * Initializes RapidIO capable DMA channel for the specified data transfer. + * Uses DMA channel private extension to pass information related to remote + * target RIO device. + * Returns pointer to DMA transaction descriptor or NULL if failed. + */ +struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, + struct dma_chan *dchan, struct rio_dma_data *data, + enum dma_transfer_direction direction, unsigned long flags) +{ + return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags); } EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); @@ -1410,34 +1642,73 @@ * rio_register_scan - enumeration/discovery method registration interface * @mport_id: mport device ID for which fabric scan routine has to be set * (RIO_MPORT_ANY = set for all available mports) - * @scan_ops: enumeration/discovery control structure + * @scan_ops: enumeration/discovery operations structure + * + * Registers enumeration/discovery operations with RapidIO subsystem and + * attaches it to the specified mport device (or all available mports + * if RIO_MPORT_ANY is specified). * - * Assigns enumeration or discovery method to the specified mport device (or all - * available mports if RIO_MPORT_ANY is specified). * Returns error if the mport already has an enumerator attached to it. - * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns - * an error if was unable to find at least one available mport. + * In case of RIO_MPORT_ANY skips mports with valid scan routines (no error). */ int rio_register_scan(int mport_id, struct rio_scan *scan_ops) { struct rio_mport *port; - int rc = -EBUSY; + struct rio_scan_node *scan; + int rc = 0; - mutex_lock(&rio_mport_list_lock); - list_for_each_entry(port, &rio_mports, node) { - if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { - if (port->nscan && mport_id == RIO_MPORT_ANY) - continue; - else if (port->nscan) - break; + pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); - port->nscan = scan_ops; - rc = 0; + if ((mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) || + !scan_ops) + return -EINVAL; - if (mport_id != RIO_MPORT_ANY) - break; + mutex_lock(&rio_mport_list_lock); + + /* + * Check if there is another enumerator already registered for + * the same mport ID (including RIO_MPORT_ANY). Multiple enumerators + * for the same mport ID are not supported. + */ + list_for_each_entry(scan, &rio_scans, node) { + if (scan->mport_id == mport_id) { + rc = -EBUSY; + goto err_out; } } + + /* + * Allocate and initialize new scan registration node. + */ + scan = kzalloc(sizeof(*scan), GFP_KERNEL); + if (!scan) { + rc = -ENOMEM; + goto err_out; + } + + scan->mport_id = mport_id; + scan->ops = scan_ops; + + /* + * Traverse the list of registered mports to attach this new scan. + * + * The new scan with matching mport ID overrides any previously attached + * scan assuming that old scan (if any) is the default one (based on the + * enumerator registration check above). + * If the new scan is the global one, it will be attached only to mports + * that do not have their own individual operations already attached. + */ + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id) { + port->nscan = scan_ops; + break; + } else if (mport_id == RIO_MPORT_ANY && !port->nscan) + port->nscan = scan_ops; + } + + list_add_tail(&scan->node, &rio_scans); + +err_out: mutex_unlock(&rio_mport_list_lock); return rc; @@ -1447,30 +1718,83 @@ /** * rio_unregister_scan - removes enumeration/discovery method from mport * @mport_id: mport device ID for which fabric scan routine has to be - * unregistered (RIO_MPORT_ANY = set for all available mports) + * unregistered (RIO_MPORT_ANY = apply to all mports that use + * the specified scan_ops) + * @scan_ops: enumeration/discovery operations structure * * Removes enumeration or discovery method assigned to the specified mport - * device (or all available mports if RIO_MPORT_ANY is specified). + * device. If RIO_MPORT_ANY is specified, removes the specified operations from + * all mports that have them attached. */ -int rio_unregister_scan(int mport_id) +int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops) { struct rio_mport *port; + struct rio_scan_node *scan; + + pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); + + if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) + return -EINVAL; mutex_lock(&rio_mport_list_lock); - list_for_each_entry(port, &rio_mports, node) { - if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { - if (port->nscan) - port->nscan = NULL; - if (mport_id != RIO_MPORT_ANY) - break; + + list_for_each_entry(port, &rio_mports, node) + if (port->id == mport_id || + (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) + port->nscan = NULL; + + list_for_each_entry(scan, &rio_scans, node) { + if (scan->mport_id == mport_id) { + list_del(&scan->node); + kfree(scan); + break; } } + mutex_unlock(&rio_mport_list_lock); return 0; } EXPORT_SYMBOL_GPL(rio_unregister_scan); +/** + * rio_mport_scan - execute enumeration/discovery on the specified mport + * @mport_id: number (ID) of mport device + */ +int rio_mport_scan(int mport_id) +{ + struct rio_mport *port = NULL; + int rc; + + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id) + goto found; + } + mutex_unlock(&rio_mport_list_lock); + return -ENODEV; +found: + if (!port->nscan) { + mutex_unlock(&rio_mport_list_lock); + return -EINVAL; + } + + if (!try_module_get(port->nscan->owner)) { + mutex_unlock(&rio_mport_list_lock); + return -ENODEV; + } + + mutex_unlock(&rio_mport_list_lock); + + if (port->host_deviceid >= 0) + rc = port->nscan->enumerate(port, 0); + else + rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); + + module_put(port->nscan->owner); + return rc; +} + static void rio_fixup_device(struct rio_dev *dev) { } @@ -1499,7 +1823,10 @@ work = container_of(_work, struct rio_disc_work, work); pr_debug("RIO: discovery work for mport %d %s\n", work->mport->id, work->mport->name); - work->mport->nscan->discover(work->mport, 0); + if (try_module_get(work->mport->nscan->owner)) { + work->mport->nscan->discover(work->mport, 0); + module_put(work->mport->nscan->owner); + } } int rio_init_mports(void) @@ -1518,8 +1845,10 @@ mutex_lock(&rio_mport_list_lock); list_for_each_entry(port, &rio_mports, node) { if (port->host_deviceid >= 0) { - if (port->nscan) + if (port->nscan && try_module_get(port->nscan->owner)) { port->nscan->enumerate(port, 0); + module_put(port->nscan->owner); + } } else n++; } @@ -1533,7 +1862,7 @@ * for each of them. If the code below fails to allocate needed * resources, exit without error to keep results of enumeration * process (if any). - * TODO: Implement restart of dicovery process for all or + * TODO: Implement restart of discovery process for all or * individual discovering mports. */ rio_wq = alloc_workqueue("riodisc", 0, 0); @@ -1559,9 +1888,9 @@ n++; } } - mutex_unlock(&rio_mport_list_lock); flush_workqueue(rio_wq); + mutex_unlock(&rio_mport_list_lock); pr_debug("RIO: destroy discovery workqueue\n"); destroy_workqueue(rio_wq); kfree(work); @@ -1572,26 +1901,19 @@ return 0; } -static int hdids[RIO_MAX_MPORTS + 1]; - static int rio_get_hdid(int index) { - if (!hdids[0] || hdids[0] <= index || index >= RIO_MAX_MPORTS) + if (ids_num == 0 || ids_num <= index || index >= RIO_MAX_MPORTS) return -1; - return hdids[index + 1]; -} - -static int rio_hdid_setup(char *str) -{ - (void)get_options(str, ARRAY_SIZE(hdids), hdids); - return 1; + return hdid[index]; } -__setup("riohdid=", rio_hdid_setup); - int rio_register_mport(struct rio_mport *port) { + struct rio_scan_node *scan = NULL; + int res = 0; + if (next_portid >= RIO_MAX_MPORTS) { pr_err("RIO: reached specified max number of mports\n"); return 1; @@ -1600,11 +1922,38 @@ port->id = next_portid++; port->host_deviceid = rio_get_hdid(port->id); port->nscan = NULL; + + dev_set_name(&port->dev, "rapidio%d", port->id); + port->dev.class = &rio_mport_class; + + res = device_register(&port->dev); + if (res) + dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", + port->id, res); + else + dev_dbg(&port->dev, "RIO: mport%d registered\n", port->id); + mutex_lock(&rio_mport_list_lock); list_add_tail(&port->node, &rio_mports); + + /* + * Check if there are any registered enumeration/discovery operations + * that have to be attached to the added mport. + */ + list_for_each_entry(scan, &rio_scans, node) { + if (port->id == scan->mport_id || + scan->mport_id == RIO_MPORT_ANY) { + port->nscan = scan->ops; + if (port->id == scan->mport_id) + break; + } + } mutex_unlock(&rio_mport_list_lock); + + pr_debug("RIO: %s %s id=%d\n", __func__, port->name, port->id); return 0; } +EXPORT_SYMBOL_GPL(rio_register_mport); EXPORT_SYMBOL_GPL(rio_local_get_device_id); EXPORT_SYMBOL_GPL(rio_get_device);