4 * Copyright(c) 2015, 2016 Intel Corporation.
6 * This file is provided under a dual BSD/GPLv2 license. When using or
7 * redistributing this file, you may do so under either license.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * - Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in
30 * the documentation and/or other materials provided with the
32 * - Neither the name of Intel Corporation nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <linux/list.h>
51 #include <linux/workqueue.h>
52 #include <linux/sched.h>
54 #include "sdma_txreq.h"
57 * typedef (*restart_t)() - restart callback
58 * @work: pointer to work structure
60 typedef void (*restart_t)(struct work_struct *work);
65 * struct iowait - linkage for delayed progress/waiting
66 * @list: used to add/insert into QP/PQ wait lists
67 * @tx_head: overflow list of sdma_txreq's
68 * @sleep: no space callback
69 * @wakeup: space callback wakeup
70 * @sdma_drained: sdma count drained
71 * @iowork: workqueue overhead
72 * @wait_dma: wait for sdma_busy == 0
73 * @wait_pio: wait for pio_busy == 0
74 * @sdma_busy: # of packets in flight
75 * @count: total number of descriptors in tx_head'ed list
76 * @tx_limit: limit for overflow queuing
77 * @tx_count: number of tx entry's in tx_head'ed list
79 * This is to be embedded in user's state structure
82 * The sleep and wakeup members are a
83 * bit misnamed. They do not strictly
84 * speaking sleep or wake up, but they
85 * are callbacks for the ULP to implement
86 * what ever queuing/dequeuing of
87 * the embedded iowait and its containing struct
88 * when a resource shortage like SDMA ring space is seen.
90 * Both potentially have locks help
91 * so sleeping is not allowed.
93 * The wait_dma member along with the iow
97 struct list_head list;
98 struct list_head tx_head;
100 struct sdma_engine *sde,
102 struct sdma_txreq *tx,
104 void (*wakeup)(struct iowait *wait, int reason);
105 void (*sdma_drained)(struct iowait *wait);
106 struct work_struct iowork;
107 wait_queue_head_t wait_dma;
108 wait_queue_head_t wait_pio;
116 #define SDMA_AVAIL_REASON 0
119 * iowait_init() - initialize wait structure
120 * @wait: wait struct to initialize
121 * @tx_limit: limit for overflow queuing
122 * @func: restart function for workqueue
123 * @sleep: sleep function for no space
124 * @resume: wakeup function for no space
126 * This function initializes the iowait
127 * structure embedded in the QP or PQ.
131 static inline void iowait_init(
134 void (*func)(struct work_struct *work),
136 struct sdma_engine *sde,
138 struct sdma_txreq *tx,
140 void (*wakeup)(struct iowait *wait, int reason),
141 void (*sdma_drained)(struct iowait *wait))
144 INIT_LIST_HEAD(&wait->list);
145 INIT_LIST_HEAD(&wait->tx_head);
146 INIT_WORK(&wait->iowork, func);
147 init_waitqueue_head(&wait->wait_dma);
148 init_waitqueue_head(&wait->wait_pio);
149 atomic_set(&wait->sdma_busy, 0);
150 atomic_set(&wait->pio_busy, 0);
151 wait->tx_limit = tx_limit;
153 wait->wakeup = wakeup;
154 wait->sdma_drained = sdma_drained;
158 * iowait_schedule() - initialize wait structure
159 * @wait: wait struct to schedule
160 * @wq: workqueue for schedule
163 static inline void iowait_schedule(
165 struct workqueue_struct *wq,
168 queue_work_on(cpu, wq, &wait->iowork);
172 * iowait_sdma_drain() - wait for DMAs to drain
174 * @wait: iowait structure
176 * This will delay until the iowait sdmas have
179 static inline void iowait_sdma_drain(struct iowait *wait)
181 wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy));
185 * iowait_sdma_pending() - return sdma pending count
187 * @wait: iowait structure
190 static inline int iowait_sdma_pending(struct iowait *wait)
192 return atomic_read(&wait->sdma_busy);
196 * iowait_sdma_inc - note sdma io pending
197 * @wait: iowait structure
199 static inline void iowait_sdma_inc(struct iowait *wait)
201 atomic_inc(&wait->sdma_busy);
205 * iowait_sdma_add - add count to pending
206 * @wait: iowait structure
208 static inline void iowait_sdma_add(struct iowait *wait, int count)
210 atomic_add(count, &wait->sdma_busy);
214 * iowait_sdma_dec - note sdma complete
215 * @wait: iowait structure
217 static inline int iowait_sdma_dec(struct iowait *wait)
219 return atomic_dec_and_test(&wait->sdma_busy);
223 * iowait_pio_drain() - wait for pios to drain
225 * @wait: iowait structure
227 * This will delay until the iowait pios have
230 static inline void iowait_pio_drain(struct iowait *wait)
232 wait_event_timeout(wait->wait_pio,
233 !atomic_read(&wait->pio_busy),
238 * iowait_pio_pending() - return pio pending count
240 * @wait: iowait structure
243 static inline int iowait_pio_pending(struct iowait *wait)
245 return atomic_read(&wait->pio_busy);
249 * iowait_pio_inc - note pio pending
250 * @wait: iowait structure
252 static inline void iowait_pio_inc(struct iowait *wait)
254 atomic_inc(&wait->pio_busy);
258 * iowait_sdma_dec - note pio complete
259 * @wait: iowait structure
261 static inline int iowait_pio_dec(struct iowait *wait)
263 return atomic_dec_and_test(&wait->pio_busy);
267 * iowait_drain_wakeup() - trigger iowait_drain() waiter
269 * @wait: iowait structure
271 * This will trigger any waiters.
273 static inline void iowait_drain_wakeup(struct iowait *wait)
275 wake_up(&wait->wait_dma);
276 wake_up(&wait->wait_pio);
277 if (wait->sdma_drained)
278 wait->sdma_drained(wait);
282 * iowait_get_txhead() - get packet off of iowait list
284 * @wait wait struture
286 static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
288 struct sdma_txreq *tx = NULL;
290 if (!list_empty(&wait->tx_head)) {
291 tx = list_first_entry(
295 list_del_init(&tx->list);