--- zzzz-none-000/linux-3.10.107/drivers/misc/mei/client.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/misc/mei/client.c 2021-02-04 17:41:59.000000000 +0000 @@ -14,10 +14,11 @@ * */ -#include #include #include #include +#include +#include #include @@ -26,68 +27,311 @@ #include "client.h" /** - * mei_me_cl_by_uuid - locate index of me client + * mei_me_cl_init - initialize me client + * + * @me_cl: me client + */ +void mei_me_cl_init(struct mei_me_client *me_cl) +{ + INIT_LIST_HEAD(&me_cl->list); + kref_init(&me_cl->refcnt); +} + +/** + * mei_me_cl_get - increases me client refcount + * + * @me_cl: me client + * + * Locking: called under "dev->device_lock" lock + * + * Return: me client or NULL + */ +struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) +{ + if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) + return me_cl; + + return NULL; +} + +/** + * mei_me_cl_release - free me client + * + * Locking: called under "dev->device_lock" lock + * + * @ref: me_client refcount + */ +static void mei_me_cl_release(struct kref *ref) +{ + struct mei_me_client *me_cl = + container_of(ref, struct mei_me_client, refcnt); + + kfree(me_cl); +} + +/** + * mei_me_cl_put - decrease me client refcount and free client if necessary + * + * Locking: called under "dev->device_lock" lock + * + * @me_cl: me client + */ +void mei_me_cl_put(struct mei_me_client *me_cl) +{ + if (me_cl) + kref_put(&me_cl->refcnt, mei_me_cl_release); +} + +/** + * __mei_me_cl_del - delete me client from the list and decrease + * reference counter + * + * @dev: mei device + * @me_cl: me client + * + * Locking: dev->me_clients_rwsem + */ +static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) +{ + if (!me_cl) + return; + + list_del_init(&me_cl->list); + mei_me_cl_put(me_cl); +} + +/** + * mei_me_cl_del - delete me client from the list and decrease + * reference counter + * + * @dev: mei device + * @me_cl: me client + */ +void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) +{ + down_write(&dev->me_clients_rwsem); + __mei_me_cl_del(dev, me_cl); + up_write(&dev->me_clients_rwsem); +} + +/** + * mei_me_cl_add - add me client to the list * * @dev: mei device - * returns me client index or -ENOENT if not found + * @me_cl: me client */ -int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) +void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) { - int i, res = -ENOENT; + down_write(&dev->me_clients_rwsem); + list_add(&me_cl->list, &dev->me_clients); + up_write(&dev->me_clients_rwsem); +} - for (i = 0; i < dev->me_clients_num; ++i) - if (uuid_le_cmp(*uuid, - dev->me_clients[i].props.protocol_name) == 0) { - res = i; +/** + * __mei_me_cl_by_uuid - locate me client by uuid + * increases ref count + * + * @dev: mei device + * @uuid: me client uuid + * + * Return: me client or NULL if not found + * + * Locking: dev->me_clients_rwsem + */ +static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, + const uuid_le *uuid) +{ + struct mei_me_client *me_cl; + const uuid_le *pn; + + WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); + + list_for_each_entry(me_cl, &dev->me_clients, list) { + pn = &me_cl->props.protocol_name; + if (uuid_le_cmp(*uuid, *pn) == 0) + return mei_me_cl_get(me_cl); + } + + return NULL; +} + +/** + * mei_me_cl_by_uuid - locate me client by uuid + * increases ref count + * + * @dev: mei device + * @uuid: me client uuid + * + * Return: me client or NULL if not found + * + * Locking: dev->me_clients_rwsem + */ +struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, + const uuid_le *uuid) +{ + struct mei_me_client *me_cl; + + down_read(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid(dev, uuid); + up_read(&dev->me_clients_rwsem); + + return me_cl; +} + +/** + * mei_me_cl_by_id - locate me client by client id + * increases ref count + * + * @dev: the device structure + * @client_id: me client id + * + * Return: me client or NULL if not found + * + * Locking: dev->me_clients_rwsem + */ +struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) +{ + + struct mei_me_client *__me_cl, *me_cl = NULL; + + down_read(&dev->me_clients_rwsem); + list_for_each_entry(__me_cl, &dev->me_clients, list) { + if (__me_cl->client_id == client_id) { + me_cl = mei_me_cl_get(__me_cl); break; } + } + up_read(&dev->me_clients_rwsem); - return res; + return me_cl; } - /** - * mei_me_cl_by_id return index to me_clients for client_id + * __mei_me_cl_by_uuid_id - locate me client by client id and uuid + * increases ref count * * @dev: the device structure + * @uuid: me client uuid * @client_id: me client id * - * Locking: called under "dev->device_lock" lock + * Return: me client or null if not found * - * returns index on success, -ENOENT on failure. + * Locking: dev->me_clients_rwsem */ +static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, + const uuid_le *uuid, u8 client_id) +{ + struct mei_me_client *me_cl; + const uuid_le *pn; + + WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); + + list_for_each_entry(me_cl, &dev->me_clients, list) { + pn = &me_cl->props.protocol_name; + if (uuid_le_cmp(*uuid, *pn) == 0 && + me_cl->client_id == client_id) + return mei_me_cl_get(me_cl); + } + + return NULL; +} + -int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) +/** + * mei_me_cl_by_uuid_id - locate me client by client id and uuid + * increases ref count + * + * @dev: the device structure + * @uuid: me client uuid + * @client_id: me client id + * + * Return: me client or null if not found + */ +struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, + const uuid_le *uuid, u8 client_id) { - int i; - for (i = 0; i < dev->me_clients_num; i++) - if (dev->me_clients[i].client_id == client_id) - break; - if (WARN_ON(dev->me_clients[i].client_id != client_id)) - return -ENOENT; + struct mei_me_client *me_cl; - if (i == dev->me_clients_num) - return -ENOENT; + down_read(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); + up_read(&dev->me_clients_rwsem); - return i; + return me_cl; } +/** + * mei_me_cl_rm_by_uuid - remove all me clients matching uuid + * + * @dev: the device structure + * @uuid: me client uuid + * + * Locking: called under "dev->device_lock" lock + */ +void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) +{ + struct mei_me_client *me_cl; + + dev_dbg(dev->dev, "remove %pUl\n", uuid); + + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid(dev, uuid); + __mei_me_cl_del(dev, me_cl); + up_write(&dev->me_clients_rwsem); +} /** - * mei_io_list_flush - removes list entry belonging to cl. + * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id * - * @list: An instance of our list structure - * @cl: host client + * @dev: the device structure + * @uuid: me client uuid + * @id: me client id + * + * Locking: called under "dev->device_lock" lock */ -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) { - struct mei_cl_cb *cb; - struct mei_cl_cb *next; + struct mei_me_client *me_cl; - list_for_each_entry_safe(cb, next, &list->list, list) { - if (cb->cl && mei_cl_cmp_id(cl, cb->cl)) - list_del(&cb->list); - } + dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); + + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); + __mei_me_cl_del(dev, me_cl); + up_write(&dev->me_clients_rwsem); +} + +/** + * mei_me_cl_rm_all - remove all me clients + * + * @dev: the device structure + * + * Locking: called under "dev->device_lock" lock + */ +void mei_me_cl_rm_all(struct mei_device *dev) +{ + struct mei_me_client *me_cl, *next; + + down_write(&dev->me_clients_rwsem); + list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) + __mei_me_cl_del(dev, me_cl); + up_write(&dev->me_clients_rwsem); +} + +/** + * mei_cl_cmp_id - tells if the clients are the same + * + * @cl1: host client 1 + * @cl2: host client 2 + * + * Return: true - if the clients has same host and me ids + * false - otherwise + */ +static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, + const struct mei_cl *cl2) +{ + return cl1 && cl2 && + (cl1->host_client_id == cl2->host_client_id) && + (mei_cl_me_id(cl1) == mei_cl_me_id(cl2)); } /** @@ -100,20 +344,22 @@ if (cb == NULL) return; - kfree(cb->request_buffer.data); - kfree(cb->response_buffer.data); + list_del(&cb->list); + kfree(cb->buf.data); kfree(cb); } /** * mei_io_cb_init - allocate and initialize io callback * - * @cl - mei client + * @cl: mei client + * @type: operation type * @fp: pointer to file structure * - * returns mei_cl_cb pointer or NULL; + * Return: mei_cl_cb pointer or NULL; */ -struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) +struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, + struct file *fp) { struct mei_cl_cb *cb; @@ -121,25 +367,69 @@ if (!cb) return NULL; - mei_io_list_init(cb); - + INIT_LIST_HEAD(&cb->list); cb->file_object = fp; cb->cl = cl; cb->buf_idx = 0; + cb->fop_type = type; return cb; } /** - * mei_io_cb_alloc_req_buf - allocate request buffer + * __mei_io_list_flush - removes and frees cbs belonging to cl. + * + * @list: an instance of our list structure + * @cl: host client, can be NULL for flushing the whole list + * @free: whether to free the cbs + */ +static void __mei_io_list_flush(struct mei_cl_cb *list, + struct mei_cl *cl, bool free) +{ + struct mei_cl_cb *cb, *next; + + /* enable removing everything if no cl is specified */ + list_for_each_entry_safe(cb, next, &list->list, list) { + if (!cl || mei_cl_cmp_id(cl, cb->cl)) { + list_del_init(&cb->list); + if (free) + mei_io_cb_free(cb); + } + } +} + +/** + * mei_io_list_flush - removes list entry belonging to cl. + * + * @list: An instance of our list structure + * @cl: host client + */ +void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +{ + __mei_io_list_flush(list, cl, false); +} + +/** + * mei_io_list_free - removes cb belonging to cl and free them + * + * @list: An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) +{ + __mei_io_list_flush(list, cl, true); +} + +/** + * mei_io_cb_alloc_buf - allocate callback buffer * * @cb: io callback structure * @length: size of the buffer * - * returns 0 on success + * Return: 0 on success * -EINVAL if cb is NULL * -ENOMEM if allocation failed */ -int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) +int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length) { if (!cb) return -EINVAL; @@ -147,63 +437,114 @@ if (length == 0) return 0; - cb->request_buffer.data = kmalloc(length, GFP_KERNEL); - if (!cb->request_buffer.data) + cb->buf.data = kmalloc(length, GFP_KERNEL); + if (!cb->buf.data) return -ENOMEM; - cb->request_buffer.size = length; + cb->buf.size = length; return 0; } + /** - * mei_io_cb_alloc_resp_buf - allocate respose buffer + * mei_cl_alloc_cb - a convenient wrapper for allocating read cb * - * @cb: io callback structure + * @cl: host client * @length: size of the buffer + * @type: operation type + * @fp: associated file pointer (might be NULL) * - * returns 0 on success - * -EINVAL if cb is NULL - * -ENOMEM if allocation failed + * Return: cb on success and NULL on failure */ -int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) +struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, + enum mei_cb_file_ops type, struct file *fp) { + struct mei_cl_cb *cb; + + cb = mei_io_cb_init(cl, type, fp); if (!cb) - return -EINVAL; + return NULL; - if (length == 0) - return 0; + if (mei_io_cb_alloc_buf(cb, length)) { + mei_io_cb_free(cb); + return NULL; + } - cb->response_buffer.data = kmalloc(length, GFP_KERNEL); - if (!cb->response_buffer.data) - return -ENOMEM; - cb->response_buffer.size = length; - return 0; + return cb; } +/** + * mei_cl_read_cb - find this cl's callback in the read list + * for a specific file + * + * @cl: host client + * @fp: file pointer (matching cb file object), may be NULL + * + * Return: cb on success, NULL if cb is not found + */ +struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) +{ + struct mei_cl_cb *cb; + + list_for_each_entry(cb, &cl->rd_completed, list) + if (!fp || fp == cb->file_object) + return cb; + return NULL; +} + +/** + * mei_cl_read_cb_flush - free client's read pending and completed cbs + * for a specific file + * + * @cl: host client + * @fp: file pointer (matching cb file object), may be NULL + */ +void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp) +{ + struct mei_cl_cb *cb, *next; + + list_for_each_entry_safe(cb, next, &cl->rd_completed, list) + if (!fp || fp == cb->file_object) + mei_io_cb_free(cb); + + + list_for_each_entry_safe(cb, next, &cl->rd_pending, list) + if (!fp || fp == cb->file_object) + mei_io_cb_free(cb); +} /** * mei_cl_flush_queues - flushes queue lists belonging to cl. * * @cl: host client + * @fp: file pointer (matching cb file object), may be NULL + * + * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. */ -int mei_cl_flush_queues(struct mei_cl *cl) +int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) { + struct mei_device *dev; + if (WARN_ON(!cl || !cl->dev)) return -EINVAL; - dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n"); - mei_io_list_flush(&cl->dev->read_list, cl); - mei_io_list_flush(&cl->dev->write_list, cl); - mei_io_list_flush(&cl->dev->write_waiting_list, cl); + dev = cl->dev; + + cl_dbg(dev, cl, "remove list entry belonging to cl\n"); + mei_io_list_free(&cl->dev->write_list, cl); + mei_io_list_free(&cl->dev->write_waiting_list, cl); mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); + + mei_cl_read_cb_flush(cl, fp); + return 0; } /** - * mei_cl_init - initializes intialize cl. + * mei_cl_init - initializes cl. * * @cl: host client to be initialized * @dev: mei device @@ -214,10 +555,12 @@ init_waitqueue_head(&cl->wait); init_waitqueue_head(&cl->rx_wait); init_waitqueue_head(&cl->tx_wait); + init_waitqueue_head(&cl->ev_wait); + INIT_LIST_HEAD(&cl->rd_completed); + INIT_LIST_HEAD(&cl->rd_pending); INIT_LIST_HEAD(&cl->link); - INIT_LIST_HEAD(&cl->device_link); - cl->reading_state = MEI_IDLE; cl->writing_state = MEI_IDLE; + cl->state = MEI_FILE_INITIALIZING; cl->dev = dev; } @@ -225,7 +568,7 @@ * mei_cl_allocate - allocates cl structure and sets it up. * * @dev: mei device - * returns The allocated file or NULL on failure + * Return: The allocated file or NULL on failure */ struct mei_cl *mei_cl_allocate(struct mei_device *dev) { @@ -241,50 +584,40 @@ } /** - * mei_cl_find_read_cb - find this cl's callback in the read list + * mei_cl_link - allocate host id in the host map * * @cl: host client + * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one * - * returns cb on success, NULL on error - */ -struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) -{ - struct mei_device *dev = cl->dev; - struct mei_cl_cb *cb = NULL; - struct mei_cl_cb *next = NULL; - - list_for_each_entry_safe(cb, next, &dev->read_list.list, list) - if (mei_cl_cmp_id(cl, cb->cl)) - return cb; - return NULL; -} - -/** mei_cl_link: allocte host id in the host map - * - * @cl - host client - * @id - fixed host id or -1 for genereting one - * - * returns 0 on success + * Return: 0 on success * -EINVAL on incorrect values - * -ENONET if client not found + * -EMFILE if open count exceeded. */ int mei_cl_link(struct mei_cl *cl, int id) { struct mei_device *dev; + long open_handle_count; if (WARN_ON(!cl || !cl->dev)) return -EINVAL; dev = cl->dev; - /* If Id is not asigned get one*/ + /* If Id is not assigned get one*/ if (id == MEI_HOST_CLIENT_ID_ANY) id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); if (id >= MEI_CLIENTS_MAX) { - dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ; - return -ENOENT; + dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); + return -EMFILE; + } + + open_handle_count = dev->open_handle_count + dev->iamthif_open_count; + if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { + dev_err(dev->dev, "open_handle_count exceeded %d", + MEI_MAX_OPEN_HANDLE_COUNT); + return -EMFILE; } dev->open_handle_count++; @@ -296,19 +629,20 @@ cl->state = MEI_FILE_INITIALIZING; - dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id); + cl_dbg(dev, cl, "link cl\n"); return 0; } /** - * mei_cl_unlink - remove me_cl from the list + * mei_cl_unlink - remove host client from the list * * @cl: host client + * + * Return: always 0 */ int mei_cl_unlink(struct mei_cl *cl) { struct mei_device *dev; - struct mei_cl *pos, *next; /* don't shout on error exit path */ if (!cl) @@ -320,173 +654,640 @@ dev = cl->dev; - list_for_each_entry_safe(pos, next, &dev->file_list, link) { - if (cl->host_client_id == pos->host_client_id) { - dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n", - pos->host_client_id, pos->me_client_id); - list_del_init(&pos->link); - break; - } - } + cl_dbg(dev, cl, "unlink client"); + + if (dev->open_handle_count > 0) + dev->open_handle_count--; + + /* never clear the 0 bit */ + if (cl->host_client_id) + clear_bit(cl->host_client_id, dev->host_clients_map); + + list_del_init(&cl->link); + + cl->state = MEI_FILE_INITIALIZING; + return 0; } void mei_host_client_init(struct work_struct *work) { - struct mei_device *dev = container_of(work, - struct mei_device, init_work); - struct mei_client_properties *client_props; - int i; + struct mei_device *dev = + container_of(work, struct mei_device, init_work); + struct mei_me_client *me_cl; mutex_lock(&dev->device_lock); - bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); - dev->open_handle_count = 0; - /* - * Reserving the first three client IDs - * 0: Reserved for MEI Bus Message communications - * 1: Reserved for Watchdog - * 2: Reserved for AMTHI - */ - bitmap_set(dev->host_clients_map, 0, 3); - - for (i = 0; i < dev->me_clients_num; i++) { - client_props = &dev->me_clients[i].props; - - if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) - mei_amthif_host_init(dev); - else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) - mei_wd_host_init(dev); - else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid)) - mei_nfc_host_init(dev); + me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid); + if (me_cl) + mei_amthif_host_init(dev, me_cl); + mei_me_cl_put(me_cl); + + me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid); + if (me_cl) + mei_wd_host_init(dev, me_cl); + mei_me_cl_put(me_cl); + + dev->dev_state = MEI_DEV_ENABLED; + dev->reset_count = 0; + mutex_unlock(&dev->device_lock); + mei_cl_bus_rescan(dev); + + pm_runtime_mark_last_busy(dev->dev); + dev_dbg(dev->dev, "rpm: autosuspend\n"); + pm_request_autosuspend(dev->dev); +} + +/** + * mei_hbuf_acquire - try to acquire host buffer + * + * @dev: the device structure + * Return: true if host buffer was acquired + */ +bool mei_hbuf_acquire(struct mei_device *dev) +{ + if (mei_pg_state(dev) == MEI_PG_ON || + mei_pg_in_transition(dev)) { + dev_dbg(dev->dev, "device is in pg\n"); + return false; } - dev->dev_state = MEI_DEV_ENABLED; + if (!dev->hbuf_is_ready) { + dev_dbg(dev->dev, "hbuf is not ready\n"); + return false; + } - mutex_unlock(&dev->device_lock); + dev->hbuf_is_ready = false; + + return true; +} + +/** + * mei_cl_set_disconnected - set disconnected state and clear + * associated states and resources + * + * @cl: host client + */ +void mei_cl_set_disconnected(struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + + if (cl->state == MEI_FILE_DISCONNECTED || + cl->state == MEI_FILE_INITIALIZING) + return; + + cl->state = MEI_FILE_DISCONNECTED; + mei_io_list_flush(&dev->ctrl_rd_list, cl); + mei_io_list_flush(&dev->ctrl_wr_list, cl); + cl->mei_flow_ctrl_creds = 0; + cl->timer_count = 0; + + if (!cl->me_cl) + return; + + if (!WARN_ON(cl->me_cl->connect_count == 0)) + cl->me_cl->connect_count--; + + if (cl->me_cl->connect_count == 0) + cl->me_cl->mei_flow_ctrl_creds = 0; + + mei_me_cl_put(cl->me_cl); + cl->me_cl = NULL; +} + +static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) +{ + if (!mei_me_cl_get(me_cl)) + return -ENOENT; + + /* only one connection is allowed for fixed address clients */ + if (me_cl->props.fixed_address) { + if (me_cl->connect_count) { + mei_me_cl_put(me_cl); + return -EBUSY; + } + } + + cl->me_cl = me_cl; + cl->state = MEI_FILE_CONNECTING; + cl->me_cl->connect_count++; + + return 0; +} + +/* + * mei_cl_send_disconnect - send disconnect request + * + * @cl: host client + * @cb: callback block + * + * Return: 0, OK; otherwise, error. + */ +static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + struct mei_device *dev; + int ret; + + dev = cl->dev; + + ret = mei_hbm_cl_disconnect_req(dev, cl); + cl->status = ret; + if (ret) { + cl->state = MEI_FILE_DISCONNECT_REPLY; + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list.list); + cl->timer_count = MEI_CONNECT_TIMEOUT; + + return 0; +} + +/** + * mei_cl_irq_disconnect - processes close related operation from + * interrupt thread context - send disconnect request + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0, OK; otherwise, error. + */ +int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + + if (slots < msg_slots) + return -EMSGSIZE; + + ret = mei_cl_send_disconnect(cl, cb); + if (ret) + list_move_tail(&cb->list, &cmpl_list->list); + + return ret; } +/** + * __mei_cl_disconnect - disconnect host client from the me one + * internal function runtime pm has to be already acquired + * + * @cl: host client + * + * Return: 0 on success, <0 on failure. + */ +static int __mei_cl_disconnect(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + + dev = cl->dev; + + cl->state = MEI_FILE_DISCONNECTING; + + cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL); + rets = cb ? 0 : -ENOMEM; + if (rets) + goto out; + + cl_dbg(dev, cl, "add disconnect cb to control write list\n"); + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + + if (mei_hbuf_acquire(dev)) { + rets = mei_cl_send_disconnect(cl, cb); + if (rets) { + cl_err(dev, cl, "failed to disconnect.\n"); + goto out; + } + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, cl->state == MEI_FILE_DISCONNECT_REPLY, + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + + rets = cl->status; + if (cl->state != MEI_FILE_DISCONNECT_REPLY) { + cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); + rets = -ETIME; + } + +out: + /* we disconnect also on error */ + mei_cl_set_disconnected(cl); + if (!rets) + cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); + + mei_io_cb_free(cb); + return rets; +} /** - * mei_cl_disconnect - disconnect host clinet form the me one + * mei_cl_disconnect - disconnect host client from the me one * * @cl: host client * * Locking: called under "dev->device_lock" lock * - * returns 0 on success, <0 on failure. + * Return: 0 on success, <0 on failure. */ int mei_cl_disconnect(struct mei_cl *cl) { struct mei_device *dev; - struct mei_cl_cb *cb; - int rets, err; + int rets; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; dev = cl->dev; - if (cl->state != MEI_FILE_DISCONNECTING) + cl_dbg(dev, cl, "disconnecting"); + + if (!mei_cl_is_connected(cl)) return 0; - cb = mei_io_cb_init(cl, NULL); - if (!cb) - return -ENOMEM; + if (mei_cl_is_fixed_address(cl)) { + mei_cl_set_disconnected(cl); + return 0; + } + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + rets = __mei_cl_disconnect(cl); + + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return rets; +} - cb->fop_type = MEI_FOP_CLOSE; - if (dev->hbuf_is_ready) { - dev->hbuf_is_ready = false; - if (mei_hbm_cl_disconnect_req(dev, cl)) { - rets = -ENODEV; - dev_err(&dev->pdev->dev, "failed to disconnect.\n"); - goto free; - } - cl->timer_count = MEI_CONNECT_TIMEOUT; - mdelay(10); /* Wait for hardware disconnection ready */ - list_add_tail(&cb->list, &dev->ctrl_rd_list.list); - } else { - dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n"); - list_add_tail(&cb->list, &dev->ctrl_wr_list.list); +/** + * mei_cl_is_other_connecting - checks if other + * client with the same me client id is connecting + * + * @cl: private data of the file object + * + * Return: true if other client is connected, false - otherwise. + */ +static bool mei_cl_is_other_connecting(struct mei_cl *cl) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + + dev = cl->dev; + + list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) { + if (cb->fop_type == MEI_FOP_CONNECT && + mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) + return true; } - mutex_unlock(&dev->device_lock); - err = wait_event_timeout(dev->wait_recvd_msg, - MEI_FILE_DISCONNECTED == cl->state, - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + return false; +} + +/** + * mei_cl_send_connect - send connect request + * + * @cl: host client + * @cb: callback block + * + * Return: 0, OK; otherwise, error. + */ +static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + struct mei_device *dev; + int ret; + + dev = cl->dev; + + ret = mei_hbm_cl_connect_req(dev, cl); + cl->status = ret; + if (ret) { + cl->state = MEI_FILE_DISCONNECT_REPLY; + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list.list); + cl->timer_count = MEI_CONNECT_TIMEOUT; + return 0; +} + +/** + * mei_cl_irq_connect - send connect request in irq_thread context + * + * @cl: host client + * @cb: callback block + * @cmpl_list: complete list + * + * Return: 0, OK; otherwise, error. + */ +int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int rets; + + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + if (mei_cl_is_other_connecting(cl)) + return 0; + + if (slots < msg_slots) + return -EMSGSIZE; + + rets = mei_cl_send_connect(cl, cb); + if (rets) + list_move_tail(&cb->list, &cmpl_list->list); + + return rets; +} + +/** + * mei_cl_connect - connect host client to the me one + * + * @cl: host client + * @me_cl: me client + * @file: pointer to file structure + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on success, <0 on failure. + */ +int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, + struct file *file) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + + if (WARN_ON(!cl || !cl->dev || !me_cl)) + return -ENODEV; + + dev = cl->dev; + + rets = mei_cl_set_connecting(cl, me_cl); + if (rets) + return rets; + + if (mei_cl_is_fixed_address(cl)) { + cl->state = MEI_FILE_CONNECTED; + return 0; + } + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + goto nortpm; + } + + cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file); + rets = cb ? 0 : -ENOMEM; + if (rets) + goto out; + + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + + /* run hbuf acquire last so we don't have to undo */ + if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { + rets = mei_cl_send_connect(cl, cb); + if (rets) + goto out; + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, + (cl->state == MEI_FILE_CONNECTED || + cl->state == MEI_FILE_DISCONNECT_REQUIRED || + cl->state == MEI_FILE_DISCONNECT_REPLY), + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); - if (MEI_FILE_DISCONNECTED == cl->state) { - rets = 0; - dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n"); - } else { - rets = -ENODEV; - if (MEI_FILE_DISCONNECTED != cl->state) - dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n"); - - if (err) - dev_dbg(&dev->pdev->dev, - "wait failed disconnect err=%08x\n", - err); - dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n"); + if (!mei_cl_is_connected(cl)) { + if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { + mei_io_list_flush(&dev->ctrl_rd_list, cl); + mei_io_list_flush(&dev->ctrl_wr_list, cl); + /* ignore disconnect return valuue; + * in case of failure reset will be invoked + */ + __mei_cl_disconnect(cl); + rets = -EFAULT; + goto out; + } + + /* timeout or something went really wrong */ + if (!cl->status) + cl->status = -EFAULT; } - mei_io_list_flush(&dev->ctrl_rd_list, cl); - mei_io_list_flush(&dev->ctrl_wr_list, cl); -free: + rets = cl->status; +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + mei_io_cb_free(cb); + +nortpm: + if (!mei_cl_is_connected(cl)) + mei_cl_set_disconnected(cl); + return rets; } +/** + * mei_cl_alloc_linked - allocate and link host client + * + * @dev: the device structure + * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one + * + * Return: cl on success ERR_PTR on failure + */ +struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id) +{ + struct mei_cl *cl; + int ret; + + cl = mei_cl_allocate(dev); + if (!cl) { + ret = -ENOMEM; + goto err; + } + + ret = mei_cl_link(cl, id); + if (ret) + goto err; + + return cl; +err: + kfree(cl); + return ERR_PTR(ret); +} + + /** - * mei_cl_is_other_connecting - checks if other - * client with the same me client id is connecting + * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. * * @cl: private data of the file object * - * returns ture if other client is connected, 0 - otherwise. + * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise. */ -bool mei_cl_is_other_connecting(struct mei_cl *cl) +int mei_cl_flow_ctrl_creds(struct mei_cl *cl) { - struct mei_device *dev; - struct mei_cl *pos; - struct mei_cl *next; + int rets; - if (WARN_ON(!cl || !cl->dev)) - return false; + if (WARN_ON(!cl || !cl->me_cl)) + return -EINVAL; - dev = cl->dev; + if (cl->mei_flow_ctrl_creds > 0) + return 1; - list_for_each_entry_safe(pos, next, &dev->file_list, link) { - if ((pos->state == MEI_FILE_CONNECTING) && - (pos != cl) && cl->me_client_id == pos->me_client_id) - return true; + if (mei_cl_is_fixed_address(cl)) { + rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL); + if (rets && rets != -EBUSY) + return rets; + return 1; + } + if (mei_cl_is_single_recv_buf(cl)) { + if (cl->me_cl->mei_flow_ctrl_creds > 0) + return 1; } + return 0; +} - return false; +/** + * mei_cl_flow_ctrl_reduce - reduces flow_control. + * + * @cl: private data of the file object + * + * Return: + * 0 on success + * -EINVAL when ctrl credits are <= 0 + */ +int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) +{ + if (WARN_ON(!cl || !cl->me_cl)) + return -EINVAL; + + if (mei_cl_is_fixed_address(cl)) + return 0; + + if (mei_cl_is_single_recv_buf(cl)) { + if (WARN_ON(cl->me_cl->mei_flow_ctrl_creds <= 0)) + return -EINVAL; + cl->me_cl->mei_flow_ctrl_creds--; + } else { + if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) + return -EINVAL; + cl->mei_flow_ctrl_creds--; + } + return 0; +} + +/** + * mei_cl_notify_fop2req - convert fop to proper request + * + * @fop: client notification start response command + * + * Return: MEI_HBM_NOTIFICATION_START/STOP + */ +u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) +{ + if (fop == MEI_FOP_NOTIFY_START) + return MEI_HBM_NOTIFICATION_START; + else + return MEI_HBM_NOTIFICATION_STOP; +} + +/** + * mei_cl_notify_req2fop - convert notification request top file operation type + * + * @req: hbm notification request type + * + * Return: MEI_FOP_NOTIFY_START/STOP + */ +enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) +{ + if (req == MEI_HBM_NOTIFICATION_START) + return MEI_FOP_NOTIFY_START; + else + return MEI_FOP_NOTIFY_STOP; +} + +/** + * mei_cl_irq_notify - send notification request in irq_thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + bool request; + + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + + if (slots < msg_slots) + return -EMSGSIZE; + + request = mei_cl_notify_fop2req(cb->fop_type); + ret = mei_hbm_cl_notify_req(dev, cl, request); + if (ret) { + cl->status = ret; + list_move_tail(&cb->list, &cmpl_list->list); + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list.list); + return 0; } /** - * mei_cl_connect - connect host clinet to the me one + * mei_cl_notify_request - send notification stop/start request * * @cl: host client + * @file: associate request with file + * @request: 1 for start or 0 for stop * * Locking: called under "dev->device_lock" lock * - * returns 0 on success, <0 on failure. + * Return: 0 on such and error otherwise. */ -int mei_cl_connect(struct mei_cl *cl, struct file *file) +int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request) { struct mei_device *dev; struct mei_cl_cb *cb; - long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT); + enum mei_cb_file_ops fop_type; int rets; if (WARN_ON(!cl || !cl->dev)) @@ -494,211 +1295,309 @@ dev = cl->dev; - cb = mei_io_cb_init(cl, file); + if (!dev->hbm_f_ev_supported) { + cl_dbg(dev, cl, "notifications not supported\n"); + return -EOPNOTSUPP; + } + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + fop_type = mei_cl_notify_req2fop(request); + cb = mei_io_cb_init(cl, fop_type, file); if (!cb) { rets = -ENOMEM; goto out; } - cb->fop_type = MEI_FOP_IOCTL; - - if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) { - dev->hbuf_is_ready = false; - - if (mei_hbm_cl_connect_req(dev, cl)) { + if (mei_hbuf_acquire(dev)) { + if (mei_hbm_cl_notify_req(dev, cl, request)) { rets = -ENODEV; goto out; } - cl->timer_count = MEI_CONNECT_TIMEOUT; list_add_tail(&cb->list, &dev->ctrl_rd_list.list); } else { - cl->state = MEI_FILE_INITIALIZING; list_add_tail(&cb->list, &dev->ctrl_wr_list.list); } mutex_unlock(&dev->device_lock); - rets = wait_event_timeout(dev->wait_recvd_msg, - (cl->state == MEI_FILE_CONNECTED || - cl->state == MEI_FILE_DISCONNECTED), - timeout * HZ); + wait_event_timeout(cl->wait, cl->notify_en == request, + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); - if (cl->state != MEI_FILE_CONNECTED) { - rets = -EFAULT; - + if (cl->notify_en != request) { mei_io_list_flush(&dev->ctrl_rd_list, cl); mei_io_list_flush(&dev->ctrl_wr_list, cl); - goto out; + if (!cl->status) + cl->status = -EFAULT; } rets = cl->status; out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + mei_io_cb_free(cb); return rets; } /** - * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. + * mei_cl_notify - raise notification * - * @cl: private data of the file object + * @cl: host client * - * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. - * -ENOENT if mei_cl is not present - * -EINVAL if single_recv_buf == 0 + * Locking: called under "dev->device_lock" lock */ -int mei_cl_flow_ctrl_creds(struct mei_cl *cl) +void mei_cl_notify(struct mei_cl *cl) { struct mei_device *dev; - int i; - if (WARN_ON(!cl || !cl->dev)) - return -EINVAL; + if (!cl || !cl->dev) + return; dev = cl->dev; - if (!dev->me_clients_num) - return 0; + if (!cl->notify_en) + return; - if (cl->mei_flow_ctrl_creds > 0) - return 1; + cl_dbg(dev, cl, "notify event"); + cl->notify_ev = true; + wake_up_interruptible_all(&cl->ev_wait); - for (i = 0; i < dev->me_clients_num; i++) { - struct mei_me_client *me_cl = &dev->me_clients[i]; - if (me_cl->client_id == cl->me_client_id) { - if (me_cl->mei_flow_ctrl_creds) { - if (WARN_ON(me_cl->props.single_recv_buf == 0)) - return -EINVAL; - return 1; - } else { - return 0; - } - } - } - return -ENOENT; + if (cl->ev_async) + kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); + + mei_cl_bus_notify_event(cl); } /** - * mei_cl_flow_ctrl_reduce - reduces flow_control. + * mei_cl_notify_get - get or wait for notification event * - * @cl: private data of the file object + * @cl: host client + * @block: this request is blocking + * @notify_ev: true if notification event was received * - * @returns - * 0 on success - * -ENOENT when me client is not found - * -EINVAL when ctrl credits are <= 0 + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on such and error otherwise. */ -int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) +int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) { struct mei_device *dev; - int i; + int rets; + + *notify_ev = false; if (WARN_ON(!cl || !cl->dev)) - return -EINVAL; + return -ENODEV; dev = cl->dev; - if (!dev->me_clients_num) - return -ENOENT; + if (!mei_cl_is_connected(cl)) + return -ENODEV; - for (i = 0; i < dev->me_clients_num; i++) { - struct mei_me_client *me_cl = &dev->me_clients[i]; - if (me_cl->client_id == cl->me_client_id) { - if (me_cl->props.single_recv_buf != 0) { - if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) - return -EINVAL; - dev->me_clients[i].mei_flow_ctrl_creds--; - } else { - if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) - return -EINVAL; - cl->mei_flow_ctrl_creds--; - } - return 0; - } - } - return -ENOENT; + if (cl->notify_ev) + goto out; + + if (!block) + return -EAGAIN; + + mutex_unlock(&dev->device_lock); + rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); + mutex_lock(&dev->device_lock); + + if (rets < 0) + return rets; + +out: + *notify_ev = cl->notify_ev; + cl->notify_ev = false; + return 0; } /** * mei_cl_read_start - the start read client message function. * * @cl: host client + * @length: number of bytes to read + * @fp: pointer to file structure * - * returns 0 on success, <0 on failure. + * Return: 0 on success, <0 on failure. */ -int mei_cl_read_start(struct mei_cl *cl, size_t length) +int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp) { struct mei_device *dev; struct mei_cl_cb *cb; int rets; - int i; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; dev = cl->dev; - if (cl->state != MEI_FILE_CONNECTED) - return -ENODEV; - - if (dev->dev_state != MEI_DEV_ENABLED) + if (!mei_cl_is_connected(cl)) return -ENODEV; - if (cl->read_cb) { - dev_dbg(&dev->pdev->dev, "read is pending.\n"); + /* HW currently supports only one pending read */ + if (!list_empty(&cl->rd_pending)) return -EBUSY; - } - i = mei_me_cl_by_id(dev, cl->me_client_id); - if (i < 0) { - dev_err(&dev->pdev->dev, "no such me client %d\n", - cl->me_client_id); - return -ENODEV; + + if (!mei_me_cl_is_active(cl->me_cl)) { + cl_err(dev, cl, "no such me client\n"); + return -ENOTTY; } - cb = mei_io_cb_init(cl, NULL); + /* always allocate at least client max message */ + length = max_t(size_t, length, mei_cl_mtu(cl)); + cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp); if (!cb) return -ENOMEM; - /* always allocate at least client max message */ - length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); - rets = mei_io_cb_alloc_resp_buf(cb, length); - if (rets) - goto err; + if (mei_cl_is_fixed_address(cl)) { + list_add_tail(&cb->list, &cl->rd_pending); + return 0; + } - cb->fop_type = MEI_FOP_READ; - if (dev->hbuf_is_ready) { - dev->hbuf_is_ready = false; - if (mei_hbm_cl_flow_control_req(dev, cl)) { - rets = -ENODEV; - goto err; - } - list_add_tail(&cb->list, &dev->read_list.list); + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + goto nortpm; + } + + if (mei_hbuf_acquire(dev)) { + rets = mei_hbm_cl_flow_control_req(dev, cl); + if (rets < 0) + goto out; + + list_add_tail(&cb->list, &cl->rd_pending); } else { + rets = 0; list_add_tail(&cb->list, &dev->ctrl_wr_list.list); } - cl->read_cb = cb; +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); +nortpm: + if (rets) + mei_io_cb_free(cb); return rets; -err: - mei_io_cb_free(cb); - return rets; +} + +/** + * mei_cl_irq_write - write a message to device + * from the interrupt thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0, OK; otherwise error. + */ +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev; + struct mei_msg_data *buf; + struct mei_msg_hdr mei_hdr; + size_t len; + u32 msg_slots; + int slots; + int rets; + bool first_chunk; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + buf = &cb->buf; + + first_chunk = cb->buf_idx == 0; + + rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1; + if (rets < 0) + return rets; + + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + return 0; + } + + slots = mei_hbuf_empty_slots(dev); + len = buf->size - cb->buf_idx; + msg_slots = mei_data2slots(len); + + mei_hdr.host_addr = mei_cl_host_addr(cl); + mei_hdr.me_addr = mei_cl_me_id(cl); + mei_hdr.reserved = 0; + mei_hdr.internal = cb->internal; + + if (slots >= msg_slots) { + mei_hdr.length = len; + mei_hdr.msg_complete = 1; + /* Split the message only if we can write the whole host buffer */ + } else if (slots == dev->hbuf_depth) { + msg_slots = slots; + len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); + mei_hdr.length = len; + mei_hdr.msg_complete = 0; + } else { + /* wait for next time the host buffer is empty */ + return 0; + } + + cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", + cb->buf.size, cb->buf_idx); + + rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); + if (rets) { + cl->status = rets; + list_move_tail(&cb->list, &cmpl_list->list); + return rets; + } + + cl->status = 0; + cl->writing_state = MEI_WRITING; + cb->buf_idx += mei_hdr.length; + cb->completed = mei_hdr.msg_complete == 1; + + if (first_chunk) { + if (mei_cl_flow_ctrl_reduce(cl)) + return -EIO; + } + + if (mei_hdr.msg_complete) + list_move_tail(&cb->list, &dev->write_waiting_list.list); + + return 0; } /** * mei_cl_write - submit a write cb to mei device - assumes device_lock is locked + * assumes device_lock is locked * * @cl: host client - * @cl: write callback with filled data + * @cb: write callback with filled data + * @blocking: block until completed * - * returns numbe of bytes sent on success, <0 on failure. + * Return: number of bytes sent on success, <0 on failure. */ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) { struct mei_device *dev; struct mei_msg_data *buf; struct mei_msg_hdr mei_hdr; + int size; int rets; @@ -710,141 +1609,192 @@ dev = cl->dev; + buf = &cb->buf; + size = buf->size; - buf = &cb->request_buffer; + cl_dbg(dev, cl, "size=%d\n", size); - dev_dbg(&dev->pdev->dev, "mei_cl_write %d\n", buf->size); + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + cb->buf_idx = 0; + cl->writing_state = MEI_IDLE; - cb->fop_type = MEI_FOP_WRITE; + mei_hdr.host_addr = mei_cl_host_addr(cl); + mei_hdr.me_addr = mei_cl_me_id(cl); + mei_hdr.reserved = 0; + mei_hdr.msg_complete = 0; + mei_hdr.internal = cb->internal; rets = mei_cl_flow_ctrl_creds(cl); if (rets < 0) goto err; - /* Host buffer is not ready, we queue the request */ - if (rets == 0 || !dev->hbuf_is_ready) { - cb->buf_idx = 0; - /* unseting complete will enqueue the cb for write */ - mei_hdr.msg_complete = 0; - cl->writing_state = MEI_WRITING; - rets = buf->size; + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + rets = size; + goto out; + } + if (!mei_hbuf_acquire(dev)) { + cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); + rets = size; goto out; } - - dev->hbuf_is_ready = false; /* Check for a maximum length */ - if (buf->size > mei_hbuf_max_len(dev)) { + if (size > mei_hbuf_max_len(dev)) { mei_hdr.length = mei_hbuf_max_len(dev); mei_hdr.msg_complete = 0; } else { - mei_hdr.length = buf->size; + mei_hdr.length = size; mei_hdr.msg_complete = 1; } - mei_hdr.host_addr = cl->host_client_id; - mei_hdr.me_addr = cl->me_client_id; - mei_hdr.reserved = 0; - - dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n", - MEI_HDR_PRM(&mei_hdr)); - + rets = mei_write_message(dev, &mei_hdr, buf->data); + if (rets) + goto err; - if (mei_write_message(dev, &mei_hdr, buf->data)) { - rets = -EIO; + rets = mei_cl_flow_ctrl_reduce(cl); + if (rets) goto err; - } cl->writing_state = MEI_WRITING; cb->buf_idx = mei_hdr.length; + cb->completed = mei_hdr.msg_complete == 1; - rets = buf->size; out: - if (mei_hdr.msg_complete) { - if (mei_cl_flow_ctrl_reduce(cl)) { - rets = -ENODEV; - goto err; - } + if (mei_hdr.msg_complete) list_add_tail(&cb->list, &dev->write_waiting_list.list); - } else { + else list_add_tail(&cb->list, &dev->write_list.list); - } - + cb = NULL; if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { mutex_unlock(&dev->device_lock); - if (wait_event_interruptible(cl->tx_wait, - cl->writing_state == MEI_WRITE_COMPLETE)) { - if (signal_pending(current)) - rets = -EINTR; - else - rets = -ERESTARTSYS; - } + rets = wait_event_interruptible(cl->tx_wait, + cl->writing_state == MEI_WRITE_COMPLETE); mutex_lock(&dev->device_lock); + /* wait_event_interruptible returns -ERESTARTSYS */ + if (rets) { + if (signal_pending(current)) + rets = -EINTR; + goto err; + } } + + rets = size; err: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return rets; } +/** + * mei_cl_complete - processes completed operation for a client + * + * @cl: private data of the file object. + * @cb: callback block. + */ +void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) +{ + struct mei_device *dev = cl->dev; + + switch (cb->fop_type) { + case MEI_FOP_WRITE: + mei_io_cb_free(cb); + cl->writing_state = MEI_WRITE_COMPLETE; + if (waitqueue_active(&cl->tx_wait)) { + wake_up_interruptible(&cl->tx_wait); + } else { + pm_runtime_mark_last_busy(dev->dev); + pm_request_autosuspend(dev->dev); + } + break; + + case MEI_FOP_READ: + list_add_tail(&cb->list, &cl->rd_completed); + if (waitqueue_active(&cl->rx_wait)) + wake_up_interruptible_all(&cl->rx_wait); + else + mei_cl_bus_rx_event(cl); + break; + + case MEI_FOP_CONNECT: + case MEI_FOP_DISCONNECT: + case MEI_FOP_NOTIFY_STOP: + case MEI_FOP_NOTIFY_START: + if (waitqueue_active(&cl->wait)) + wake_up(&cl->wait); + + break; + case MEI_FOP_DISCONNECT_RSP: + mei_io_cb_free(cb); + mei_cl_set_disconnected(cl); + break; + default: + BUG_ON(0); + } +} + /** * mei_cl_all_disconnect - disconnect forcefully all connected clients * - * @dev - mei device + * @dev: mei device */ - void mei_cl_all_disconnect(struct mei_device *dev) { - struct mei_cl *cl, *next; + struct mei_cl *cl; - list_for_each_entry_safe(cl, next, &dev->file_list, link) { - cl->state = MEI_FILE_DISCONNECTED; - cl->mei_flow_ctrl_creds = 0; - cl->timer_count = 0; - } + list_for_each_entry(cl, &dev->file_list, link) + mei_cl_set_disconnected(cl); } /** - * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted + * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted * - * @dev - mei device + * @dev: mei device */ -void mei_cl_all_read_wakeup(struct mei_device *dev) +void mei_cl_all_wakeup(struct mei_device *dev) { - struct mei_cl *cl, *next; - list_for_each_entry_safe(cl, next, &dev->file_list, link) { + struct mei_cl *cl; + + list_for_each_entry(cl, &dev->file_list, link) { if (waitqueue_active(&cl->rx_wait)) { - dev_dbg(&dev->pdev->dev, "Waking up client!\n"); + cl_dbg(dev, cl, "Waking up reading client!\n"); wake_up_interruptible(&cl->rx_wait); } + if (waitqueue_active(&cl->tx_wait)) { + cl_dbg(dev, cl, "Waking up writing client!\n"); + wake_up_interruptible(&cl->tx_wait); + } + + /* synchronized under device mutex */ + if (waitqueue_active(&cl->ev_wait)) { + cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); + wake_up_interruptible(&cl->ev_wait); + } } } /** * mei_cl_all_write_clear - clear all pending writes - - * @dev - mei device + * + * @dev: mei device */ void mei_cl_all_write_clear(struct mei_device *dev) { - struct mei_cl_cb *cb, *next; - struct list_head *list; - - list = &dev->write_list.list; - list_for_each_entry_safe(cb, next, list, list) { - list_del(&cb->list); - mei_io_cb_free(cb); - } - - list = &dev->write_waiting_list.list; - list_for_each_entry_safe(cb, next, list, list) { - list_del(&cb->list); - mei_io_cb_free(cb); - } + mei_io_list_free(&dev->write_list, NULL); + mei_io_list_free(&dev->write_waiting_list, NULL); }