1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2022 Linaro Ltd.
4 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7 #include <linux/mhi_ep.h>
10 size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
12 return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
15 static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
19 memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
21 return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
24 void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
26 ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
29 static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
31 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
32 struct device *dev = &mhi_cntrl->mhi_dev->dev;
33 struct mhi_ep_buf_info buf_info = {};
37 /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
38 if (ring->type == RING_TYPE_ER)
41 /* No need to cache the ring if write pointer is unmodified */
42 if (ring->wr_offset == end)
45 start = ring->wr_offset;
47 buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
48 buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
49 buf_info.dev_addr = &ring->ring_cache[start];
51 ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
55 buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
56 buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
57 buf_info.dev_addr = &ring->ring_cache[start];
59 ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
64 buf_info.host_addr = ring->rbase;
65 buf_info.dev_addr = &ring->ring_cache[0];
66 buf_info.size = end * sizeof(struct mhi_ring_element);
68 ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
74 dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);
79 static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
84 wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
86 /* Cache the host ring till write offset */
87 ret = __mhi_ep_cache_ring(ring, wr_offset);
91 ring->wr_offset = wr_offset;
96 int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
100 wr_ptr = mhi_ep_mmio_get_db(ring);
102 return mhi_ep_cache_ring(ring, wr_ptr);
105 /* TODO: Support for adding multiple ring elements to the ring */
106 int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
108 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
109 struct device *dev = &mhi_cntrl->mhi_dev->dev;
110 struct mhi_ep_buf_info buf_info = {};
111 size_t old_offset = 0;
116 ret = mhi_ep_update_wr_offset(ring);
118 dev_err(dev, "Error updating write pointer\n");
122 if (ring->rd_offset < ring->wr_offset)
123 num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
125 num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
127 /* Check if there is space in ring for adding at least an element */
128 if (!num_free_elem) {
129 dev_err(dev, "No space left in the ring\n");
133 old_offset = ring->rd_offset;
134 mhi_ep_ring_inc_index(ring);
136 dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
138 /* Update rp in ring context */
139 rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
140 memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
142 buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
143 buf_info.dev_addr = el;
144 buf_info.size = sizeof(*el);
146 return mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
149 void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
152 if (ring->type == RING_TYPE_CMD) {
153 ring->db_offset_h = EP_CRDB_HIGHER;
154 ring->db_offset_l = EP_CRDB_LOWER;
155 } else if (ring->type == RING_TYPE_CH) {
156 ring->db_offset_h = CHDB_HIGHER_n(id);
157 ring->db_offset_l = CHDB_LOWER_n(id);
160 ring->db_offset_h = ERDB_HIGHER_n(id);
161 ring->db_offset_l = ERDB_LOWER_n(id);
165 int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
166 union mhi_ep_ring_ctx *ctx)
168 struct device *dev = &mhi_cntrl->mhi_dev->dev;
172 ring->mhi_cntrl = mhi_cntrl;
173 ring->ring_ctx = ctx;
174 ring->ring_size = mhi_ep_ring_num_elems(ring);
175 memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
176 ring->rbase = le64_to_cpu(val);
178 if (ring->type == RING_TYPE_CH)
179 ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
181 if (ring->type == RING_TYPE_ER)
182 ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
184 /* During ring init, both rp and wp are equal */
185 memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
186 ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
187 ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
189 /* Allocate ring cache memory for holding the copy of host ring */
190 ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
191 if (!ring->ring_cache)
194 memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
195 ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
197 dev_err(dev, "Failed to cache ring\n");
198 kfree(ring->ring_cache);
202 ring->started = true;
207 void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
209 ring->started = false;
210 kfree(ring->ring_cache);
211 ring->ring_cache = NULL;