// SPDX-License-Identifier: GPL-2.0-only #include #include #include #include #include #include #include #include #include #include "cpunet.h" #if defined(CONFIG_SOC_GRX500_BOOTCORE) || defined(CONFIG_SOC_PRX300_BOOTCORE) #define CPUNET_ON_BOOTCORE #endif #ifdef CPUNET_ON_BOOTCORE #include #endif /* This defines how many messages can be pending in each direction */ #define CPUNET_SLOTS 16u #define CPUNET_MTU (1 << 13) #define CPUNET_MAILBOX_SIZE \ (sizeof(struct queue_shared) + \ CPUNET_SLOTS * sizeof(struct mps_message)) #define CPUNET_MEMPOOL_SIZE (CPUNET_MTU * CPUNET_SLOTS) #define CPUNET_MAGIC_INIT 0xc68c3d57 #define CPUNET_MAGIC_ACK 0x13913353 /* lantiq mps subsystem */ #define MPS_SAD0SR(x) ((u32 *)(x->reg_mapping + 0x0048)) #define MPS_SAD1SR(x) ((u32 *)(x->reg_mapping + 0x0070)) #define MPS_CAD0SR(x) ((u32 *)(x->reg_mapping + 0x0050)) #define MPS_CAD1SR(x) ((u32 *)(x->reg_mapping + 0x0080)) #define MPS_AD0ENR(x) ((u32 *)(x->reg_mapping + 0x0058)) #define MPS_AD1ENR(x) ((u32 *)(x->reg_mapping + 0x0074)) #define MPS_MIN_REG_MAPPING 0x80 #define MPS_IRQ_MASK 0x1 #ifdef CPUNET_ON_BOOTCORE #define MPS_IR4 22u /* hardcoded irq number only used for bootcore */ #endif struct mps_message { u32 msg; }; struct queue_shared { u32 magic; u32 read; u32 write; struct mps_message buffer[]; }; /* queue_shared and mps_message are the only on-the-wire structs and therefore * need to have a consistent padding (i.e zero padding) on both CPUs. We also * implicitly assume that both CPUs use the same endianess. Because we cannot * easily check that this is the case we just check that they are both big * endian. In this way we get at least an error when this changes. */ static void __maybe_unused check_zero_padding_and_endianess(void) { u8 swaptest[] = { 1, 0, 0, 0 }; BUILD_BUG_ON_MSG(sizeof(struct queue_shared) != 3 * sizeof(u32), "struct queue_shared must be zero padded"); BUILD_BUG_ON_MSG(sizeof(struct mps_message) != sizeof(u32), "struct mps_message must be zero padded"); BUILD_BUG_ON_MSG( *(u32 *)swaptest == 1, "Little endian detected. Make sure that both CPUs use the same endianess."); BUILD_BUG_ON(!is_power_of_2(CPUNET_SLOTS)); } /* One queue defines one distinct data area to pass messages between CPUs. * The shared part lies inside SRAM and is read/write by both CPUs. * The capacity is positioned in private memory to prevent the other * CPU from tampering with the bound checks. */ struct queue { unsigned int capacity; struct queue_shared __iomem *shared; }; struct mempool_buffer { u8 *cpu; dma_addr_t dma; }; struct cpunet_hw { struct device *dev; void __iomem *reg_mapping; struct queue mailbox_rx; struct queue mailbox_tx; struct mempool_buffer mempool_rx[CPUNET_SLOTS]; struct mempool_buffer mempool_tx[CPUNET_SLOTS]; struct resource irqres[1]; void *private_data; bool shook_hands; }; /****************************************************************************** * * Static functions * ******************************************************************************/ static int queue_is_full(struct queue const *Q) { unsigned int read = Q->shared->read; unsigned int write = Q->shared->write; if (((write + 1) & Q->capacity) == read) return -EAGAIN; /* This either is a bug or an attack. */ BUG_ON(write > Q->capacity); return write; } static int queue_is_empty(struct queue const *Q) { unsigned int read = Q->shared->read; unsigned int write = Q->shared->write; if (read == write) return -EAGAIN; /* This either is a bug or an attack. */ BUG_ON(read > Q->capacity); return read; } static int queue_peek(struct queue const *Q, struct mps_message *E) { int read = queue_is_empty(Q); if (read < 0) return read; /* prevent Q->Buffer() being read before queue empty check */ rmb(); *E = Q->shared->buffer[read]; return read; } static void queue_drop(struct queue *Q) { /* there might be reads from the queue that must finish first */ rmb(); Q->shared->read = (Q->shared->read + 1) & Q->capacity; } static int queue_in(struct queue *Q, struct mps_message const *E) { int write = queue_is_full(Q); if (unlikely(write < 0)) return write; /* prevent Q->Buffer() being written before the pointer is read * by the queue_is_full check */ rmb(); Q->shared->buffer[write] = *E; /* data must be written before pointers */ wmb(); Q->shared->write = (write + 1) & Q->capacity; return 0; } static void trigger_int(struct cpunet_hw *hw) { #ifdef CPUNET_ON_BOOTCORE writel(MPS_IRQ_MASK, MPS_SAD1SR(hw)); #else writel(MPS_IRQ_MASK, MPS_SAD0SR(hw)); #endif } static bool shook_hands(struct cpunet_hw *hw) { const u32 magic = hw->mailbox_rx.shared->magic; if (magic == CPUNET_MAGIC_INIT || magic == CPUNET_MAGIC_ACK) { hw->shook_hands = true; hw->mailbox_rx.shared->magic = 0; if (magic == CPUNET_MAGIC_INIT) { hw->mailbox_tx.shared->magic = CPUNET_MAGIC_ACK; trigger_int(hw); } } return hw->shook_hands; } /****************************************************************************** * * Non-Static * ******************************************************************************/ unsigned int cpunet_hw_buffer_size(void) { return CPUNET_MTU; } unsigned int cpunet_hw_max_num(void) { return CPUNET_SLOTS - 1; } void *cpunet_hw_tx_alloc(struct cpunet_hw *hw) { int write = queue_is_full(&hw->mailbox_tx); if (unlikely(write < 0)) return NULL; return hw->mempool_tx[write].cpu; } void cpunet_hw_tx(struct cpunet_hw *hw, unsigned int length) { int write = queue_is_full(&hw->mailbox_tx); struct mps_message msg; int err; BUG_ON(write < 0 || length == 0 || length > CPUNET_MTU); /* make sure payload hits sdram before signalling */ dma_sync_single_for_device(hw->dev, hw->mempool_tx[write].dma, length, DMA_TO_DEVICE); msg.msg = (u32)length; err = queue_in(&hw->mailbox_tx, &msg); BUG_ON(err); trigger_int(hw); } bool cpunet_hw_tx_full(struct cpunet_hw *hw) { return queue_is_full(&hw->mailbox_tx) < 0; } int cpunet_hw_rx(struct cpunet_hw *hw, const void **data, unsigned int *len) { struct mempool_buffer *buffer; struct mps_message msg; int read; if (unlikely(!shook_hands(hw))) return -EBUSY; read = queue_peek(&hw->mailbox_rx, &msg); if (read < 0) return read; /* This might be an attack. */ BUG_ON(msg.msg > CPUNET_MTU); buffer = &hw->mempool_rx[read]; *data = buffer->cpu; *len = msg.msg; dma_sync_single_for_device(hw->dev, buffer->dma, *len, DMA_FROM_DEVICE); return 0; } void cpunet_hw_rx_done(struct cpunet_hw *hw) { queue_drop(&hw->mailbox_rx); trigger_int(hw); } void cpunet_hw_ack_int(struct cpunet_hw *hw) { #ifdef CPUNET_ON_BOOTCORE writel(MPS_IRQ_MASK, MPS_CAD0SR(hw)); #else writel(MPS_IRQ_MASK, MPS_CAD1SR(hw)); #endif } void cpunet_hw_enable_int(struct cpunet_hw *hw) { #ifdef CPUNET_ON_BOOTCORE writel(MPS_IRQ_MASK, MPS_AD0ENR(hw)); #else writel(MPS_IRQ_MASK, MPS_AD1ENR(hw)); #endif } void cpunet_hw_disable_int(struct cpunet_hw *hw) { #ifdef CPUNET_ON_BOOTCORE writel(0x0, MPS_AD0ENR(hw)); #else writel(0x0, MPS_AD1ENR(hw)); #endif } /****************************************************************************** * * Init * ******************************************************************************/ static int get_memory(struct device_node *node, size_t min_size, struct resource *out) { int ret = 0; ret = of_address_to_resource(node, 0, out); if (ret) return ret; if (resource_size(out) < min_size) { memset(out, 0, sizeof(*out)); return -EINVAL; } return ret; } static int get_phandle(const struct device *dev, const char *name, size_t min_size, struct resource *out) { struct of_phandle_args args; u32 offset, len; int ret = 0; ret = of_parse_phandle_with_fixed_args(dev->of_node, name, 2, 0, &args); if (ret) { dev_err(dev, "Failed to parse phandle %s\n", name); return ret; } offset = args.args[0]; len = args.args[1]; if (len < min_size) { dev_err(dev, "Length argument to phandle too small: 0x%x. Required 0x%x\n", len, min_size); return -EINVAL; } ret = get_memory(args.np, len + offset, out); of_node_put(args.np); if (ret) { dev_err(dev, "Failed to get reg of node %s. Required size: 0x%x\n", name, len + offset); return ret; } out->start += offset; out->end = out->start + len - 1; return ret; } static void remove_dma(struct device *dev, struct mempool_buffer *mappings, enum dma_data_direction dir) { u32 i; for (i = 0; i < CPUNET_SLOTS; ++i) { struct mempool_buffer *mapping = &mappings[i]; if (!mapping->cpu) continue; dma_unmap_single(dev, mapping->dma, CPUNET_MTU, dir); } } static int create_dma(struct device *dev, struct mempool_buffer *mappings, u8 *start, enum dma_data_direction dir) { u32 i; for (i = 0; i < CPUNET_SLOTS; ++i) { struct mempool_buffer *mapping = &mappings[i]; mapping->cpu = start + i * CPUNET_MTU; mapping->dma = dma_map_single(dev, mapping->cpu, CPUNET_MTU, dir); if (dma_mapping_error(dev, mapping->dma)) { memset(mapping, 0, sizeof(*mapping)); goto err; } } return 0; err: remove_dma(dev, mappings, dir); return -EFAULT; } static void exit_mailbox(struct cpunet_hw *hw) { iounmap(hw->mailbox_tx.shared); iounmap(hw->mailbox_rx.shared); } static int init_mailbox(struct cpunet_hw *hw) { struct resource r; int ret = 0; ret = get_phandle(hw->dev, "mailbox-rx", CPUNET_MAILBOX_SIZE, &r); if (ret) goto out; hw->mailbox_rx.shared = ioremap(r.start, resource_size(&r)); if (!hw->mailbox_rx.shared) { ret = -EFAULT; goto out; } ret = get_phandle(hw->dev, "mailbox-tx", CPUNET_MAILBOX_SIZE, &r); if (ret) goto unmap; hw->mailbox_tx.shared = ioremap(r.start, resource_size(&r)); if (!hw->mailbox_tx.shared) { ret = -EFAULT; goto unmap; } /* each side only initializes its tx side */ memset(hw->mailbox_tx.shared, 0, CPUNET_MAILBOX_SIZE); /* magic can only be written after the mailbox is initialized */ wmb(); hw->mailbox_tx.shared->magic = CPUNET_MAGIC_INIT; dev_info(hw->dev, "Initialized mailbox: rx=0x%p tx=0x%p\n", hw->mailbox_rx.shared, hw->mailbox_tx.shared); return ret; unmap: iounmap(hw->mailbox_rx.shared); out: return ret; } static void exit_mempool(struct cpunet_hw *hw) { remove_dma(hw->dev, hw->mempool_rx, DMA_FROM_DEVICE); remove_dma(hw->dev, hw->mempool_tx, DMA_TO_DEVICE); } static int init_mempool(struct cpunet_hw *hw) { const int min_align = dma_get_cache_alignment(); struct resource r; u8 *rx, *tx; int ret = 0; ret = get_phandle(hw->dev, "mempool-rx", CPUNET_MEMPOOL_SIZE, &r); if (ret) goto out; rx = phys_to_virt(r.start); if (!rx) { ret = -EFAULT; goto out; } ret = get_phandle(hw->dev, "mempool-tx", CPUNET_MEMPOOL_SIZE, &r); if (ret) goto out; tx = phys_to_virt(r.start); if (!tx) { ret = -EFAULT; goto out; } if ((uintptr_t)rx % min_align || (uintptr_t)tx % min_align || CPUNET_MTU % min_align) { dev_err(hw->dev, "Mempool and mtu must be cache aligned. Minimum alignment: %d", min_align); ret = -EINVAL; goto out; } ret = create_dma(hw->dev, hw->mempool_rx, rx, DMA_FROM_DEVICE); if (ret) goto out; ret = create_dma(hw->dev, hw->mempool_tx, tx, DMA_TO_DEVICE); if (ret) goto mempool; dev_info(hw->dev, "Initialized mempool: rx=0x%p tx=0x%p min_align=%d\n", rx, tx, min_align); return 0; mempool: exit_mempool(hw); out: return ret; } void cpunet_hw_exit(struct cpunet_hw *hw) { cpunet_hw_disable_int(hw); #ifdef CPUNET_ON_BOOTCORE grx500_bootcore_unregister_irq(MPS_IR4, GRX500_BOOTCORE_MPS2_OUT_INDEX, hw->private_data); #else devm_free_irq(hw->dev, hw->irqres[0].start, hw->private_data); #endif exit_mempool(hw); exit_mailbox(hw); iounmap(hw->reg_mapping); } struct cpunet_hw *cpunet_hw_init(struct device *dev, irq_handler_t clb, void *data) { struct resource r; struct cpunet_hw *hw; int err; if (!dev || !data) return NULL; hw = devm_kzalloc(dev, sizeof(struct cpunet_hw), GFP_KERNEL); if (!hw) return NULL; hw->dev = dev; hw->private_data = data; hw->mailbox_rx.capacity = CPUNET_SLOTS - 1; hw->mailbox_tx.capacity = CPUNET_SLOTS - 1; dev_info(dev, "slots=%u mtu=%u mailbox_size=0x%x mempool_size=0x%x\n", CPUNET_SLOTS, CPUNET_MTU, CPUNET_MAILBOX_SIZE, CPUNET_MEMPOOL_SIZE); err = get_memory(dev->of_node, MPS_MIN_REG_MAPPING, &r); if (err) goto err; hw->reg_mapping = ioremap(r.start, resource_size(&r)); if (!hw->reg_mapping) goto err; err = init_mailbox(hw); if (err) goto reg; err = init_mempool(hw); if (err) goto mailbox; #ifdef CPUNET_ON_BOOTCORE err = grx500_bootcore_register_irq(MPS_IR4, GRX500_BOOTCORE_MPS2_OUT_INDEX, clb, 0, dev_name(dev), data, grx500_bootcore_mps_irq); #else if (of_irq_to_resource_table(dev->of_node, hw->irqres, ARRAY_SIZE(hw->irqres)) != ARRAY_SIZE(hw->irqres)) { err = -ENOTCONN; goto mempool; } err = devm_request_irq(dev, hw->irqres[0].start, clb, 0, dev_name(dev), data); #endif if (err) goto mempool; dev_info(dev, "hw init successful\n"); return hw; mempool: exit_mempool(hw); mailbox: exit_mailbox(hw); reg: iounmap(hw->reg_mapping); err: devm_kfree(dev, hw); dev_err(dev, "hw init failed: %d\n", err); return NULL; }