1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <linux/pci.h>
29 #include <linux/sched/signal.h>
31 #include "vmwgfx_drv.h"
33 #define VMW_FENCE_WRAP (1 << 24)
35 static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
37 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
38 return SVGA_IRQFLAG_REG_FENCE_GOAL;
40 return SVGA_IRQFLAG_FENCE_GOAL;
44 * vmw_thread_fn - Deferred (process context) irq handler
47 * @arg: Closure argument. Pointer to a struct drm_device cast to void *
49 * This function implements the deferred part of irq processing.
50 * The function is guaranteed to run at least once after the
51 * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
54 static irqreturn_t vmw_thread_fn(int irq, void *arg)
56 struct drm_device *dev = (struct drm_device *)arg;
57 struct vmw_private *dev_priv = vmw_priv(dev);
58 irqreturn_t ret = IRQ_NONE;
60 if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
61 dev_priv->irqthread_pending)) {
62 vmw_fences_update(dev_priv->fman);
63 wake_up_all(&dev_priv->fence_queue);
67 if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
68 dev_priv->irqthread_pending)) {
69 vmw_cmdbuf_irqthread(dev_priv->cman);
77 * vmw_irq_handler: irq handler
80 * @arg: Closure argument. Pointer to a struct drm_device cast to void *
82 * This function implements the quick part of irq processing.
83 * The function performs fast actions like clearing the device interrupt
84 * flags and also reasonably quick actions like waking processes waiting for
85 * FIFO space. Other IRQ actions are deferred to the IRQ thread.
87 static irqreturn_t vmw_irq_handler(int irq, void *arg)
89 struct drm_device *dev = (struct drm_device *)arg;
90 struct vmw_private *dev_priv = vmw_priv(dev);
91 uint32_t status, masked_status;
92 irqreturn_t ret = IRQ_HANDLED;
94 status = vmw_irq_status_read(dev_priv);
95 masked_status = status & READ_ONCE(dev_priv->irq_mask);
98 vmw_irq_status_write(dev_priv, status);
103 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
104 wake_up_all(&dev_priv->fifo_queue);
106 if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
107 vmw_irqflag_fence_goal(dev_priv))) &&
108 !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
109 ret = IRQ_WAKE_THREAD;
111 if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
112 SVGA_IRQFLAG_ERROR)) &&
113 !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
114 dev_priv->irqthread_pending))
115 ret = IRQ_WAKE_THREAD;
120 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
123 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
126 void vmw_update_seqno(struct vmw_private *dev_priv)
128 uint32_t seqno = vmw_fence_read(dev_priv);
130 if (dev_priv->last_read_seqno != seqno) {
131 dev_priv->last_read_seqno = seqno;
132 vmw_fences_update(dev_priv->fman);
136 bool vmw_seqno_passed(struct vmw_private *dev_priv,
141 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
144 vmw_update_seqno(dev_priv);
145 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
148 if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
152 * Then check if the seqno is higher than what we've actually
153 * emitted. Then the fence is stale and signaled.
156 ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
162 int vmw_fallback_wait(struct vmw_private *dev_priv,
167 unsigned long timeout)
169 struct vmw_fifo_state *fifo_state = dev_priv->fifo;
170 bool fifo_down = false;
175 unsigned long end_jiffies = jiffies + timeout;
176 bool (*wait_condition)(struct vmw_private *, uint32_t);
179 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
183 * Block command submission while waiting for idle.
187 if (dev_priv->cman) {
188 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
192 } else if (fifo_state) {
193 down_read(&fifo_state->rwsem);
198 signal_seq = atomic_read(&dev_priv->marker_seq);
202 prepare_to_wait(&dev_priv->fence_queue, &__wait,
204 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
205 if (wait_condition(dev_priv, seqno))
207 if (time_after_eq(jiffies, end_jiffies)) {
208 DRM_ERROR("SVGA device lockup.\n");
213 else if ((++count & 0x0F) == 0) {
215 * FIXME: Use schedule_hr_timeout here for
216 * newer kernels and lower CPU utilization.
219 __set_current_state(TASK_RUNNING);
221 __set_current_state((interruptible) ?
223 TASK_UNINTERRUPTIBLE);
225 if (interruptible && signal_pending(current)) {
230 finish_wait(&dev_priv->fence_queue, &__wait);
231 if (ret == 0 && fifo_idle && fifo_state)
232 vmw_fence_write(dev_priv, signal_seq);
234 wake_up_all(&dev_priv->fence_queue);
237 up_read(&fifo_state->rwsem);
242 void vmw_generic_waiter_add(struct vmw_private *dev_priv,
243 u32 flag, int *waiter_count)
245 spin_lock_bh(&dev_priv->waiter_lock);
246 if ((*waiter_count)++ == 0) {
247 vmw_irq_status_write(dev_priv, flag);
248 dev_priv->irq_mask |= flag;
249 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
251 spin_unlock_bh(&dev_priv->waiter_lock);
254 void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
255 u32 flag, int *waiter_count)
257 spin_lock_bh(&dev_priv->waiter_lock);
258 if (--(*waiter_count) == 0) {
259 dev_priv->irq_mask &= ~flag;
260 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
262 spin_unlock_bh(&dev_priv->waiter_lock);
265 void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
267 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
268 &dev_priv->fence_queue_waiters);
271 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
273 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
274 &dev_priv->fence_queue_waiters);
277 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
279 vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
280 &dev_priv->goal_queue_waiters);
283 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
285 vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
286 &dev_priv->goal_queue_waiters);
289 static void vmw_irq_preinstall(struct drm_device *dev)
291 struct vmw_private *dev_priv = vmw_priv(dev);
294 status = vmw_irq_status_read(dev_priv);
295 vmw_irq_status_write(dev_priv, status);
298 void vmw_irq_uninstall(struct drm_device *dev)
300 struct vmw_private *dev_priv = vmw_priv(dev);
301 struct pci_dev *pdev = to_pci_dev(dev->dev);
305 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
308 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
310 status = vmw_irq_status_read(dev_priv);
311 vmw_irq_status_write(dev_priv, status);
313 for (i = 0; i < dev_priv->num_irq_vectors; ++i)
314 free_irq(dev_priv->irqs[i], dev);
316 pci_free_irq_vectors(pdev);
317 dev_priv->num_irq_vectors = 0;
321 * vmw_irq_install - Install the irq handlers
323 * @dev_priv: Pointer to the vmw_private device.
324 * Return: Zero if successful. Negative number otherwise.
326 int vmw_irq_install(struct vmw_private *dev_priv)
328 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
329 struct drm_device *dev = &dev_priv->drm;
334 BUILD_BUG_ON((SVGA_IRQFLAG_MAX >> VMWGFX_MAX_NUM_IRQS) != 1);
335 BUG_ON(VMWGFX_MAX_NUM_IRQS != get_count_order(SVGA_IRQFLAG_MAX));
337 nvec = pci_alloc_irq_vectors(pdev, 1, VMWGFX_MAX_NUM_IRQS,
341 drm_err(&dev_priv->drm,
342 "IRQ's are unavailable, nvec: %d\n", nvec);
347 vmw_irq_preinstall(dev);
349 for (i = 0; i < nvec; ++i) {
350 ret = pci_irq_vector(pdev, i);
352 drm_err(&dev_priv->drm,
353 "failed getting irq vector: %d\n", ret);
356 dev_priv->irqs[i] = ret;
358 ret = request_threaded_irq(dev_priv->irqs[i], vmw_irq_handler, vmw_thread_fn,
359 IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
361 drm_err(&dev_priv->drm,
362 "Failed installing irq(%d): %d\n",
363 dev_priv->irqs[i], ret);
369 dev_priv->num_irq_vectors = i;