1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual sound device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
11 #include <xen/events.h>
12 #include <xen/grant_table.h>
14 #include <xen/xenbus.h>
16 #include "xen_snd_front.h"
17 #include "xen_snd_front_alsa.h"
18 #include "xen_snd_front_cfg.h"
19 #include "xen_snd_front_evtchnl.h"
21 static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
23 struct xen_snd_front_evtchnl *channel = dev_id;
24 struct xen_snd_front_info *front_info = channel->front_info;
25 struct xensnd_resp *resp;
28 if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
31 mutex_lock(&channel->ring_io_lock);
34 rp = channel->u.req.ring.sring->rsp_prod;
35 /* Ensure we see queued responses up to rp. */
39 * Assume that the backend is trusted to always write sane values
40 * to the ring counters, so no overflow checks on frontend side
43 for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
44 resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
45 if (resp->id != channel->evt_id)
47 switch (resp->operation) {
52 case XENSND_OP_TRIGGER:
53 channel->u.req.resp_status = resp->status;
54 complete(&channel->u.req.completion);
56 case XENSND_OP_HW_PARAM_QUERY:
57 channel->u.req.resp_status = resp->status;
58 channel->u.req.resp.hw_param =
60 complete(&channel->u.req.completion);
64 dev_err(&front_info->xb_dev->dev,
65 "Operation %d is not supported\n",
71 channel->u.req.ring.rsp_cons = i;
72 if (i != channel->u.req.ring.req_prod_pvt) {
75 RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
80 channel->u.req.ring.sring->rsp_event = i + 1;
83 mutex_unlock(&channel->ring_io_lock);
87 static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
89 struct xen_snd_front_evtchnl *channel = dev_id;
90 struct xensnd_event_page *page = channel->u.evt.page;
93 if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
96 mutex_lock(&channel->ring_io_lock);
99 /* Ensure we see ring contents up to prod. */
101 if (prod == page->in_cons)
105 * Assume that the backend is trusted to always write sane values
106 * to the ring counters, so no overflow checks on frontend side
109 for (cons = page->in_cons; cons != prod; cons++) {
110 struct xensnd_evt *event;
112 event = &XENSND_IN_RING_REF(page, cons);
113 if (unlikely(event->id != channel->evt_id++))
116 switch (event->type) {
117 case XENSND_EVT_CUR_POS:
118 xen_snd_front_alsa_handle_cur_pos(channel,
119 event->op.cur_pos.position);
124 page->in_cons = cons;
125 /* Ensure ring contents. */
129 mutex_unlock(&channel->ring_io_lock);
133 void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel)
137 channel->u.req.ring.req_prod_pvt++;
138 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
140 notify_remote_via_irq(channel->irq);
143 static void evtchnl_free(struct xen_snd_front_info *front_info,
144 struct xen_snd_front_evtchnl *channel)
146 unsigned long page = 0;
148 if (channel->type == EVTCHNL_TYPE_REQ)
149 page = (unsigned long)channel->u.req.ring.sring;
150 else if (channel->type == EVTCHNL_TYPE_EVT)
151 page = (unsigned long)channel->u.evt.page;
156 channel->state = EVTCHNL_STATE_DISCONNECTED;
157 if (channel->type == EVTCHNL_TYPE_REQ) {
158 /* Release all who still waits for response if any. */
159 channel->u.req.resp_status = -EIO;
160 complete_all(&channel->u.req.completion);
164 unbind_from_irqhandler(channel->irq, channel);
167 xenbus_free_evtchn(front_info->xb_dev, channel->port);
169 /* End access and free the page. */
170 if (channel->gref != GRANT_INVALID_REF)
171 gnttab_end_foreign_access(channel->gref, 0, page);
175 memset(channel, 0, sizeof(*channel));
178 void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info)
182 if (!front_info->evt_pairs)
185 for (i = 0; i < front_info->num_evt_pairs; i++) {
186 evtchnl_free(front_info, &front_info->evt_pairs[i].req);
187 evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
190 kfree(front_info->evt_pairs);
191 front_info->evt_pairs = NULL;
194 static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index,
195 struct xen_snd_front_evtchnl *channel,
196 enum xen_snd_front_evtchnl_type type)
198 struct xenbus_device *xb_dev = front_info->xb_dev;
201 irq_handler_t handler;
202 char *handler_name = NULL;
205 memset(channel, 0, sizeof(*channel));
206 channel->type = type;
207 channel->index = index;
208 channel->front_info = front_info;
209 channel->state = EVTCHNL_STATE_DISCONNECTED;
210 channel->gref = GRANT_INVALID_REF;
211 page = get_zeroed_page(GFP_KERNEL);
217 handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME,
218 type == EVTCHNL_TYPE_REQ ?
219 XENSND_FIELD_RING_REF :
220 XENSND_FIELD_EVT_RING_REF);
226 mutex_init(&channel->ring_io_lock);
228 if (type == EVTCHNL_TYPE_REQ) {
229 struct xen_sndif_sring *sring = (struct xen_sndif_sring *)page;
231 init_completion(&channel->u.req.completion);
232 mutex_init(&channel->u.req.req_io_lock);
233 SHARED_RING_INIT(sring);
234 FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
236 ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
238 channel->u.req.ring.sring = NULL;
242 handler = evtchnl_interrupt_req;
244 ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
245 virt_to_gfn((void *)page), 0);
249 channel->u.evt.page = (struct xensnd_event_page *)page;
251 handler = evtchnl_interrupt_evt;
254 channel->gref = gref;
256 ret = xenbus_alloc_evtchn(xb_dev, &channel->port);
260 ret = bind_evtchn_to_irq(channel->port);
262 dev_err(&xb_dev->dev,
263 "Failed to bind IRQ for domid %d port %d: %d\n",
264 front_info->xb_dev->otherend_id, channel->port, ret);
270 ret = request_threaded_irq(channel->irq, NULL, handler,
271 IRQF_ONESHOT, handler_name, channel);
273 dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n",
285 dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret);
289 int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info,
292 struct xen_front_cfg_card *cfg = &front_info->cfg;
293 struct device *dev = &front_info->xb_dev->dev;
296 front_info->evt_pairs =
298 sizeof(struct xen_snd_front_evtchnl_pair),
300 if (!front_info->evt_pairs)
303 /* Iterate over devices and their streams and create event channels. */
304 for (d = 0; d < cfg->num_pcm_instances; d++) {
305 struct xen_front_cfg_pcm_instance *pcm_instance;
308 pcm_instance = &cfg->pcm_instances[d];
310 for (s = 0; s < pcm_instance->num_streams_pb; s++) {
311 index = pcm_instance->streams_pb[s].index;
313 ret = evtchnl_alloc(front_info, index,
314 &front_info->evt_pairs[index].req,
317 dev_err(dev, "Error allocating control channel\n");
321 ret = evtchnl_alloc(front_info, index,
322 &front_info->evt_pairs[index].evt,
325 dev_err(dev, "Error allocating in-event channel\n");
330 for (s = 0; s < pcm_instance->num_streams_cap; s++) {
331 index = pcm_instance->streams_cap[s].index;
333 ret = evtchnl_alloc(front_info, index,
334 &front_info->evt_pairs[index].req,
337 dev_err(dev, "Error allocating control channel\n");
341 ret = evtchnl_alloc(front_info, index,
342 &front_info->evt_pairs[index].evt,
345 dev_err(dev, "Error allocating in-event channel\n");
351 front_info->num_evt_pairs = num_streams;
355 xen_snd_front_evtchnl_free_all(front_info);
359 static int evtchnl_publish(struct xenbus_transaction xbt,
360 struct xen_snd_front_evtchnl *channel,
361 const char *path, const char *node_ring,
362 const char *node_chnl)
364 struct xenbus_device *xb_dev = channel->front_info->xb_dev;
367 /* Write control channel ring reference. */
368 ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref);
370 dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret);
374 /* Write event channel ring reference. */
375 ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port);
377 dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret);
384 int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info)
386 struct xen_front_cfg_card *cfg = &front_info->cfg;
387 struct xenbus_transaction xbt;
391 ret = xenbus_transaction_start(&xbt);
393 xenbus_dev_fatal(front_info->xb_dev, ret,
394 "starting transaction");
398 for (d = 0; d < cfg->num_pcm_instances; d++) {
399 struct xen_front_cfg_pcm_instance *pcm_instance;
402 pcm_instance = &cfg->pcm_instances[d];
404 for (s = 0; s < pcm_instance->num_streams_pb; s++) {
405 index = pcm_instance->streams_pb[s].index;
407 ret = evtchnl_publish(xbt,
408 &front_info->evt_pairs[index].req,
409 pcm_instance->streams_pb[s].xenstore_path,
410 XENSND_FIELD_RING_REF,
411 XENSND_FIELD_EVT_CHNL);
415 ret = evtchnl_publish(xbt,
416 &front_info->evt_pairs[index].evt,
417 pcm_instance->streams_pb[s].xenstore_path,
418 XENSND_FIELD_EVT_RING_REF,
419 XENSND_FIELD_EVT_EVT_CHNL);
424 for (s = 0; s < pcm_instance->num_streams_cap; s++) {
425 index = pcm_instance->streams_cap[s].index;
427 ret = evtchnl_publish(xbt,
428 &front_info->evt_pairs[index].req,
429 pcm_instance->streams_cap[s].xenstore_path,
430 XENSND_FIELD_RING_REF,
431 XENSND_FIELD_EVT_CHNL);
435 ret = evtchnl_publish(xbt,
436 &front_info->evt_pairs[index].evt,
437 pcm_instance->streams_cap[s].xenstore_path,
438 XENSND_FIELD_EVT_RING_REF,
439 XENSND_FIELD_EVT_EVT_CHNL);
444 ret = xenbus_transaction_end(xbt, 0);
449 xenbus_dev_fatal(front_info->xb_dev, ret,
450 "completing transaction");
455 xenbus_transaction_end(xbt, 1);
457 xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore");
461 void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair,
464 enum xen_snd_front_evtchnl_state state;
467 state = EVTCHNL_STATE_CONNECTED;
469 state = EVTCHNL_STATE_DISCONNECTED;
471 mutex_lock(&evt_pair->req.ring_io_lock);
472 evt_pair->req.state = state;
473 mutex_unlock(&evt_pair->req.ring_io_lock);
475 mutex_lock(&evt_pair->evt.ring_io_lock);
476 evt_pair->evt.state = state;
477 mutex_unlock(&evt_pair->evt.ring_io_lock);
480 void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
482 mutex_lock(&evt_pair->req.ring_io_lock);
483 evt_pair->req.evt_next_id = 0;
484 mutex_unlock(&evt_pair->req.ring_io_lock);
486 mutex_lock(&evt_pair->evt.ring_io_lock);
487 evt_pair->evt.evt_next_id = 0;
488 mutex_unlock(&evt_pair->evt.ring_io_lock);