diff --git a/lib/include/openamp/rpmsg.h b/lib/include/openamp/rpmsg.h index 8435c769d..8b8ec2ed1 100644 --- a/lib/include/openamp/rpmsg.h +++ b/lib/include/openamp/rpmsg.h @@ -83,11 +83,22 @@ struct rpmsg_endpoint { /** * struct rpmsg_device_ops - RPMsg device operations * @send_offchannel_raw: send RPMsg data + * @hold_rx_buffer: hold RPMsg RX buffer + * @release_rx_buffer: release RPMsg RX buffer + * @get_tx_payload_buffer: get RPMsg TX buffer + * @send_offchannel_nocopy: send RPMsg data without copy */ struct rpmsg_device_ops { int (*send_offchannel_raw)(struct rpmsg_device *rdev, uint32_t src, uint32_t dst, const void *data, int len, int wait); + void (*hold_rx_buffer)(struct rpmsg_device *rdev, void *rxbuf); + void (*release_rx_buffer)(struct rpmsg_device *rdev, void *rxbuf); + void *(*get_tx_payload_buffer)(struct rpmsg_device *rdev, + uint32_t *len, int wait); + int (*send_offchannel_nocopy)(struct rpmsg_device *rdev, + uint32_t src, uint32_t dst, + const void *data, int len); }; /** @@ -265,6 +276,166 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, return rpmsg_send_offchannel_raw(ept, src, dst, data, len, false); } +/** + * @brief Holds the rx buffer for usage outside the receive callback. + * + * Calling this function prevents the RPMsg receive buffer from being released + * back to the pool of shmem buffers. This API can only be called at rx + * callback context (rpmsg_rx_cb_t). With this API, the application doesn't + * need to copy the message in rx callback. Instead, the rx buffer base address + * is saved in application context and further processed in application + * process. After the message is processed, the application can release the rx + * buffer for future reuse in vring by calling the rpmsg_release_rx_buffer() + * function. + * + * @param: ept The rpmsg endpoint + * @param: rxbuf RX buffer with message payload + * + * @see rpmsg_release_rx_buffer + */ +void rpmsg_hold_rx_buffer(struct rpmsg_endpoint *ept, void *rxbuf); + +/** + * @brief Releases the rx buffer for future reuse in vring. + * + * This API can be called at process context when the message in rx buffer is + * processed. + * + * @ept: the rpmsg endpoint + * @rxbuf: rx buffer with message payload + * + * @see rpmsg_hold_rx_buffer + */ +void rpmsg_release_rx_buffer(struct rpmsg_endpoint *ept, void *rxbuf); + +/** + * @brief Gets the tx buffer for message payload. + * + * This API can only be called at process context to get the tx buffer in vring. + * By this way, the application can directly put its message into the vring tx + * buffer without copy from an application buffer. + * It is the application responsibility to correctly fill the allocated tx + * buffer by data and passing correct parameters to the rpmsg_send_nocopy() or + * rpmsg_sendto_nocopy() function to perform data no-copy-send mechanism. + * + * @ept: Pointer to rpmsg endpoint + * @len: Pointer to store tx buffer size + * @wait: Boolean, wait or not for buffer to become available + * + * @return The tx buffer address on success and NULL on failure + * + * @see rpmsg_send_offchannel_nocopy + * @see rpmsg_sendto_nocopy + * @see rpmsg_send_nocopy + */ +void *rpmsg_get_tx_payload_buffer(struct rpmsg_endpoint *ept, + uint32_t *len, int wait); + +/** + * rpmsg_send_offchannel_nocopy() - send a message in tx buffer reserved by + * rpmsg_get_tx_payload_buffer() across to the remote processor. + * + * This function sends buf of length len to the remote dst address, + * and uses src as the source address. + * The message will be sent to the remote processor which the ept + * endpoint belongs to. + * The application has to take the responsibility for: + * 1. tx buffer reserved (rpmsg_get_tx_payload_buffer() ) + * 2. filling the data to be sent into the pre-allocated tx buffer + * 3. not exceeding the buffer size when filling the data + * 4. data cache coherency + * + * After the rpmsg_send_offchannel_nocopy() function is issued the tx buffer is + * no more owned by the sending task and must not be touched anymore unless the + * rpmsg_send_offchannel_nocopy() function fails and returns an error. In that + * case application should try to re-issue the rpmsg_send_offchannel_nocopy() + * again. + * + * @ept: The rpmsg endpoint + * @src: The rpmsg endpoint local address + * @dst: The rpmsg endpoint remote address + * @data: TX buffer with message filled + * @len: Length of payload + * + * @return number of bytes it has sent or negative error value on failure. + * + * @see rpmsg_get_tx_payload_buffer + * @see rpmsg_sendto_nocopy + * @see rpmsg_send_nocopy + */ +int rpmsg_send_offchannel_nocopy(struct rpmsg_endpoint *ept, uint32_t src, + uint32_t dst, const void *data, int len); + +/** + * @brief rpmsg_sendto_nocopy() - sends a message in tx buffer allocated by + * rpmsg_get_tx_payload_buffer() across to the remote processor, specify dst. + * + * This function sends buf of length len to the remote dst address. + * The message will be sent to the remote processor which the ept + * endpoint belongs to, using ept's source address. + * The application has to take the responsibility for: + * 1. tx buffer allocation (rpmsg_get_tx_payload_buffer() ) + * 2. filling the data to be sent into the pre-allocated tx buffer + * 3. not exceeding the buffer size when filling the data + * 4. data cache coherency + * + * After the rpmsg_sendto_nocopy() function is issued the tx buffer is no more + * owned by the sending task and must not be touched anymore unless the + * rpmsg_sendto_nocopy() function fails and returns an error. In that case the + * application should try to re-issue the rpmsg_sendto_nocopy() again. + * + * @ept: The rpmsg endpoint + * @data: TX buffer with message filled + * @len: Length of payload + * @dst: Destination address + * + * @return number of bytes it has sent or negative error value on failure. + * + * @see rpmsg_get_tx_payload_buffer + * @see rpmsg_send_offchannel_nocopy + * @see rpmsg_send_nocopy + */ +static inline int rpmsg_sendto_nocopy(struct rpmsg_endpoint *ept, + const void *data, int len, uint32_t dst) +{ + return rpmsg_send_offchannel_nocopy(ept, ept->addr, dst, data, len); +} + +/** + * rpmsg_send_nocopy() - send a message in tx buffer reserved by + * rpmsg_get_tx_payload_buffer() across to the remote processor. + * + * This function sends buf of length len on the ept endpoint. + * The message will be sent to the remote processor which the ept + * endpoint belongs to, using ept's source and destination addresses. + * The application has to take the responsibility for: + * 1. tx buffer reserved (rpmsg_get_tx_payload_buffer() ) + * 2. filling the data to be sent into the pre-allocated tx buffer + * 3. not exceeding the buffer size when filling the data + * 4. data cache coherency + * + * After the rpmsg_send_nocopy() function is issued the tx buffer is no more + * owned by the sending task and must not be touched anymore unless the + * rpmsg_send_nocopy() function fails and returns an error. In that case the + * application should try to re-issue the rpmsg_send_nocopy() again. + * + * @ept: The rpmsg endpoint + * @data: TX buffer with message filled + * @len: Length of payload + * + * @return number of bytes it has sent or negative error value on failure. + * + * @see rpmsg_get_tx_payload_buffer + * @see rpmsg_send_offchannel_nocopy + * @see rpmsg_sendto_nocopy + */ +static inline int rpmsg_send_nocopy(struct rpmsg_endpoint *ept, + const void *data, int len) +{ + return rpmsg_send_offchannel_nocopy(ept, ept->addr, + ept->dest_addr, data, len); +} + /** * rpmsg_init_ept - initialize rpmsg endpoint * diff --git a/lib/rpmsg/rpmsg.c b/lib/rpmsg/rpmsg.c index 82b5a45e8..46c3691e9 100644 --- a/lib/rpmsg/rpmsg.c +++ b/lib/rpmsg/rpmsg.c @@ -144,6 +144,65 @@ int rpmsg_send_ns_message(struct rpmsg_endpoint *ept, unsigned long flags) return RPMSG_SUCCESS; } +void rpmsg_hold_rx_buffer(struct rpmsg_endpoint *ept, void *rxbuf) +{ + struct rpmsg_device *rdev; + + if (!ept || !ept->rdev || !rxbuf) + return; + + rdev = ept->rdev; + + if (rdev->ops.hold_rx_buffer) + rdev->ops.hold_rx_buffer(rdev, rxbuf); +} + +void rpmsg_release_rx_buffer(struct rpmsg_endpoint *ept, void *rxbuf) +{ + struct rpmsg_device *rdev; + + if (!ept || !ept->rdev || !rxbuf) + return; + + rdev = ept->rdev; + + if (rdev->ops.release_rx_buffer) + rdev->ops.release_rx_buffer(rdev, rxbuf); +} + +void *rpmsg_get_tx_payload_buffer(struct rpmsg_endpoint *ept, + uint32_t *len, int wait) +{ + struct rpmsg_device *rdev; + + if (!ept || !ept->rdev || !len) + return NULL; + + rdev = ept->rdev; + + if (rdev->ops.get_tx_payload_buffer) + return rdev->ops.get_tx_payload_buffer(rdev, len, wait); + + return NULL; +} + +int rpmsg_send_offchannel_nocopy(struct rpmsg_endpoint *ept, uint32_t src, + uint32_t dst, const void *data, int len) +{ + struct rpmsg_device *rdev; + + if (!ept || !ept->rdev || !data || dst == RPMSG_ADDR_ANY) + return RPMSG_ERR_PARAM; + + rdev = ept->rdev; + + if (rdev->ops.send_offchannel_nocopy) + return rdev->ops.send_offchannel_nocopy(rdev, src, dst, + data, len); + + return RPMSG_ERR_PARAM; +} + struct rpmsg_endpoint *rpmsg_get_endpoint(struct rpmsg_device *rdev, const char *name, uint32_t addr, uint32_t dest_addr) diff --git a/lib/rpmsg/rpmsg_internal.h b/lib/rpmsg/rpmsg_internal.h index b381cb70e..a451af68f 100644 --- a/lib/rpmsg/rpmsg_internal.h +++ b/lib/rpmsg/rpmsg_internal.h @@ -35,7 +35,12 @@ extern "C" { } while (0) #endif +#define RPMSG_BUF_HELD (1U << 31) /* Flag to suggest to hold the buffer */ + +#define RPMSG_LOCATE_HDR(p) \ + ((struct rpmsg_hdr *)((unsigned char *)(p) - sizeof(struct rpmsg_hdr))) #define RPMSG_LOCATE_DATA(p) ((unsigned char *)(p) + sizeof(struct rpmsg_hdr)) + /** * enum rpmsg_ns_flags - dynamic name service announcement flags * diff --git a/lib/rpmsg/rpmsg_virtio.c b/lib/rpmsg/rpmsg_virtio.c index 4e97d339d..711af6ba3 100644 --- a/lib/rpmsg/rpmsg_virtio.c +++ b/lib/rpmsg/rpmsg_virtio.c @@ -147,6 +147,7 @@ static void *rpmsg_virtio_get_tx_buffer(struct rpmsg_virtio_device *rvdev, data = rpmsg_virtio_shm_pool_get_buffer(rvdev->shpool, RPMSG_BUFFER_SIZE); *len = RPMSG_BUFFER_SIZE; + *idx = 0; } } #endif /*!VIRTIO_SLAVE_ONLY*/ @@ -260,42 +261,54 @@ static int _rpmsg_virtio_get_buffer_size(struct rpmsg_virtio_device *rvdev) return length; } -/** - * This function sends rpmsg "message" to remote device. - * - * @param rdev - pointer to rpmsg device - * @param src - source address of channel - * @param dst - destination address of channel - * @param data - data to transmit - * @param len - size of data - * @param wait - boolean, wait or not for buffer to become - * available - * - * @return - size of data sent or negative value for failure. - * - */ -static int rpmsg_virtio_send_offchannel_raw(struct rpmsg_device *rdev, - uint32_t src, uint32_t dst, - const void *data, - int len, int wait) +static void rpmsg_virtio_hold_rx_buffer(struct rpmsg_device *rdev, void *rxbuf) +{ + struct rpmsg_hdr *rp_hdr; + + (void)rdev; + + rp_hdr = RPMSG_LOCATE_HDR(rxbuf); + + /* Set held status to keep buffer */ + rp_hdr->reserved |= RPMSG_BUF_HELD; +} + +static void rpmsg_virtio_release_rx_buffer(struct rpmsg_device *rdev, + void *rxbuf) { struct rpmsg_virtio_device *rvdev; - struct rpmsg_hdr rp_hdr; - void *buffer = NULL; + struct rpmsg_hdr *rp_hdr; + uint16_t idx; + uint32_t len; + + rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev); + rp_hdr = RPMSG_LOCATE_HDR(rxbuf); + /* The reserved field contains buffer index */ + idx = rp_hdr->reserved & ~RPMSG_BUF_HELD; + + metal_mutex_acquire(&rdev->lock); + /* Return buffer on virtqueue. */ + len = virtqueue_get_buffer_length(rvdev->rvq, idx); + rpmsg_virtio_return_buffer(rvdev, rp_hdr, len, idx); + metal_mutex_release(&rdev->lock); +} + +static void *rpmsg_virtio_get_tx_payload_buffer(struct rpmsg_device *rdev, + uint32_t *len, int wait) +{ + struct rpmsg_virtio_device *rvdev; + struct rpmsg_hdr *rp_hdr; uint16_t idx; int tick_count; - uint32_t buff_len; int status; - struct metal_io_region *io; /* Get the associated remote device for channel. */ rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev); - status = rpmsg_virtio_get_status(rvdev); /* Validate device state */ - if (!(status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) { - return RPMSG_ERR_DEV_STATE; - } + status = rpmsg_virtio_get_status(rvdev); + if (!(status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) + return NULL; if (wait) tick_count = RPMSG_TICK_COUNT / RPMSG_TICKS_PER_INTERVAL; @@ -303,49 +316,70 @@ static int rpmsg_virtio_send_offchannel_raw(struct rpmsg_device *rdev, tick_count = 0; while (1) { - int avail_size; - /* Lock the device to enable exclusive access to virtqueues */ metal_mutex_acquire(&rdev->lock); - avail_size = _rpmsg_virtio_get_buffer_size(rvdev); - if (avail_size && len > avail_size) { - metal_mutex_release(&rdev->lock); - return RPMSG_ERR_BUFF_SIZE; - } - - buffer = rpmsg_virtio_get_tx_buffer(rvdev, &buff_len, &idx); + rp_hdr = rpmsg_virtio_get_tx_buffer(rvdev, len, &idx); metal_mutex_release(&rdev->lock); - if (buffer || !tick_count) + if (rp_hdr || !tick_count) break; - if (avail_size != 0) - return RPMSG_ERR_BUFF_SIZE; metal_sleep_usec(RPMSG_TICKS_PER_INTERVAL); tick_count--; } - if (!buffer) - return RPMSG_ERR_NO_BUFF; + + if (!rp_hdr) + return NULL; + + /* Store the index into the reserved field to be used when sending */ + rp_hdr->reserved = idx; + + /* Actual data buffer size is vring buffer size minus header length */ + *len -= sizeof(struct rpmsg_hdr); + return RPMSG_LOCATE_DATA(rp_hdr); +} + +static int rpmsg_virtio_send_offchannel_nocopy(struct rpmsg_device *rdev, + uint32_t src, uint32_t dst, + const void *data, int len) +{ + struct rpmsg_virtio_device *rvdev; + struct metal_io_region *io; + struct rpmsg_hdr rp_hdr; + struct rpmsg_hdr *hdr; + uint32_t buff_len; + uint16_t idx; + int status; + + /* Get the associated remote device for channel. */ + rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev); + + hdr = RPMSG_LOCATE_HDR(data); + /* The reserved field contains buffer index */ + idx = hdr->reserved; /* Initialize RPMSG header. */ rp_hdr.dst = dst; rp_hdr.src = src; rp_hdr.len = len; rp_hdr.reserved = 0; + rp_hdr.flags = 0; /* Copy data to rpmsg buffer. */ io = rvdev->shbuf_io; - status = metal_io_block_write(io, metal_io_virt_to_offset(io, buffer), + status = metal_io_block_write(io, metal_io_virt_to_offset(io, hdr), &rp_hdr, sizeof(rp_hdr)); RPMSG_ASSERT(status == sizeof(rp_hdr), "failed to write header\r\n"); - status = metal_io_block_write(io, - metal_io_virt_to_offset(io, - RPMSG_LOCATE_DATA(buffer)), - data, len); - RPMSG_ASSERT(status == len, "failed to write buffer\r\n"); metal_mutex_acquire(&rdev->lock); +#ifndef VIRTIO_SLAVE_ONLY + if (rpmsg_virtio_get_role(rvdev) == RPMSG_MASTER) + buff_len = RPMSG_BUFFER_SIZE; + else +#endif /*!VIRTIO_SLAVE_ONLY*/ + buff_len = virtqueue_get_buffer_length(rvdev->svq, idx); + /* Enqueue buffer on virtqueue. */ - status = rpmsg_virtio_enqueue_buffer(rvdev, buffer, buff_len, idx); + status = rpmsg_virtio_enqueue_buffer(rvdev, hdr, buff_len, idx); RPMSG_ASSERT(status == VQUEUE_SUCCESS, "failed to enqueue buffer\r\n"); /* Let the other side know that there is a job to process. */ virtqueue_kick(rvdev->svq); @@ -355,6 +389,50 @@ static int rpmsg_virtio_send_offchannel_raw(struct rpmsg_device *rdev, return len; } +/** + * This function sends rpmsg "message" to remote device. + * + * @param rdev - pointer to rpmsg device + * @param src - source address of channel + * @param dst - destination address of channel + * @param data - data to transmit + * @param len - size of data + * @param wait - boolean, wait or not for buffer to become + * available + * + * @return - size of data sent or negative value for failure. + * + */ +static int rpmsg_virtio_send_offchannel_raw(struct rpmsg_device *rdev, + uint32_t src, uint32_t dst, + const void *data, + int len, int wait) +{ + struct rpmsg_virtio_device *rvdev; + struct metal_io_region *io; + uint32_t buff_len; + void *buffer; + int status; + + /* Get the associated remote device for channel. */ + rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev); + + /* Get the payload buffer. */ + buffer = rpmsg_virtio_get_tx_payload_buffer(rdev, &buff_len, wait); + if (!buffer) + return RPMSG_ERR_NO_BUFF; + + /* Copy data to rpmsg buffer. */ + if (len > (int)buff_len) + len = buff_len; + io = rvdev->shbuf_io; + status = metal_io_block_write(io, metal_io_virt_to_offset(io, buffer), + data, len); + RPMSG_ASSERT(status == len, "failed to write buffer\r\n"); + + return rpmsg_virtio_send_offchannel_nocopy(rdev, src, dst, buffer, len); +} + /** * rpmsg_virtio_tx_callback * @@ -396,6 +474,8 @@ static void rpmsg_virtio_rx_callback(struct virtqueue *vq) metal_mutex_release(&rdev->lock); while (rp_hdr) { + rp_hdr->reserved = idx; + /* Get the channel node from the remote device channels list. */ metal_mutex_acquire(&rdev->lock); ept = rpmsg_get_ept_from_addr(rdev, rp_hdr->dst); @@ -418,8 +498,11 @@ static void rpmsg_virtio_rx_callback(struct virtqueue *vq) metal_mutex_acquire(&rdev->lock); - /* Return used buffers. */ - rpmsg_virtio_return_buffer(rvdev, rp_hdr, len, idx); + /* Check whether callback wants to hold buffer */ + if (!(rp_hdr->reserved & RPMSG_BUF_HELD)) { + /* No, return used buffers. */ + rpmsg_virtio_return_buffer(rvdev, rp_hdr, len, idx); + } rp_hdr = rpmsg_virtio_get_rx_buffer(rvdev, &len, &idx); if (!rp_hdr) { @@ -530,6 +613,10 @@ int rpmsg_init_vdev(struct rpmsg_virtio_device *rvdev, rdev->ns_bind_cb = ns_bind_cb; vdev->priv = rvdev; rdev->ops.send_offchannel_raw = rpmsg_virtio_send_offchannel_raw; + rdev->ops.hold_rx_buffer = rpmsg_virtio_hold_rx_buffer; + rdev->ops.release_rx_buffer = rpmsg_virtio_release_rx_buffer; + rdev->ops.get_tx_payload_buffer = rpmsg_virtio_get_tx_payload_buffer; + rdev->ops.send_offchannel_nocopy = rpmsg_virtio_send_offchannel_nocopy; role = rpmsg_virtio_get_role(rvdev); #ifndef VIRTIO_MASTER_ONLY