1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
5 #include <linux/hashtable.h>
6 #include "i915_gem_batch_pool.h"
7 #include "i915_gem_request.h"
8 #include "i915_gem_timeline.h"
9 #include "i915_selftest.h"
11 #define I915_CMD_HASH_ORDER 9
13 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
14 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
15 * to give some inclination as to some of the magic values used in the various
18 #define CACHELINE_BYTES 64
19 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
21 struct intel_hw_status_page {
27 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
28 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
30 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
31 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
33 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
34 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
36 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
37 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
39 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
40 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
42 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
43 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
45 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
46 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
48 #define gen8_semaphore_seqno_size sizeof(uint64_t)
49 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
50 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
51 #define GEN8_SIGNAL_OFFSET(__ring, to) \
52 (dev_priv->semaphore->node.start + \
53 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
54 #define GEN8_WAIT_OFFSET(__ring, from) \
55 (dev_priv->semaphore->node.start + \
56 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
58 enum intel_engine_hangcheck_action {
63 ENGINE_ACTIVE_SUBUNITS,
68 static inline const char *
69 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
76 case ENGINE_ACTIVE_SEQNO:
77 return "active seqno";
78 case ENGINE_ACTIVE_HEAD:
80 case ENGINE_ACTIVE_SUBUNITS:
81 return "active subunits";
82 case ENGINE_WAIT_KICK:
91 #define I915_MAX_SLICES 3
92 #define I915_MAX_SUBSLICES 3
94 #define instdone_slice_mask(dev_priv__) \
95 (INTEL_GEN(dev_priv__) == 7 ? \
96 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
98 #define instdone_subslice_mask(dev_priv__) \
99 (INTEL_GEN(dev_priv__) == 7 ? \
100 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
102 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
103 for ((slice__) = 0, (subslice__) = 0; \
104 (slice__) < I915_MAX_SLICES; \
105 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
106 (slice__) += ((subslice__) == 0)) \
107 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
108 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
110 struct intel_instdone {
112 /* The following exist only in the RCS engine */
114 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
115 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
118 struct intel_engine_hangcheck {
121 enum intel_engine_hangcheck_action action;
122 unsigned long action_timestamp;
124 struct intel_instdone instdone;
125 struct drm_i915_gem_request *active_request;
130 struct i915_vma *vma;
133 struct list_head request_list;
144 struct i915_gem_context;
145 struct drm_i915_reg_table;
148 * we use a single page to load ctx workarounds so all of these
149 * values are referred in terms of dwords
151 * struct i915_wa_ctx_bb:
152 * offset: specifies batch starting position, also helpful in case
153 * if we want to have multiple batches at different offsets based on
154 * some criteria. It is not a requirement at the moment but provides
155 * an option for future use.
156 * size: size of the batch in DWORDS
158 struct i915_ctx_workarounds {
159 struct i915_wa_ctx_bb {
162 } indirect_ctx, per_ctx;
163 struct i915_vma *vma;
166 struct drm_i915_gem_request;
167 struct intel_render_state;
170 * Engine IDs definitions.
171 * Keep instances of the same type engine together.
173 enum intel_engine_id {
178 #define _VCS(n) (VCS + (n))
182 struct i915_priolist {
184 struct list_head requests;
188 #define INTEL_ENGINE_CS_MAX_NAME 8
190 struct intel_engine_cs {
191 struct drm_i915_private *i915;
192 char name[INTEL_ENGINE_CS_MAX_NAME];
193 enum intel_engine_id id;
194 unsigned int uabi_id;
202 unsigned int irq_shift;
204 struct intel_ring *buffer;
205 struct intel_timeline *timeline;
207 struct intel_render_state *render_state;
210 unsigned long irq_posted;
211 #define ENGINE_IRQ_BREADCRUMB 0
212 #define ENGINE_IRQ_EXECLIST 1
214 /* Rather than have every client wait upon all user interrupts,
215 * with the herd waking after every interrupt and each doing the
216 * heavyweight seqno dance, we delegate the task (of being the
217 * bottom-half of the user interrupt) to the first client. After
218 * every interrupt, we wake up one client, who does the heavyweight
219 * coherent seqno read and either goes back to sleep (if incomplete),
220 * or wakes up all the completed clients in parallel, before then
221 * transferring the bottom-half status to the next client in the queue.
223 * Compared to walking the entire list of waiters in a single dedicated
224 * bottom-half, we reduce the latency of the first waiter by avoiding
225 * a context switch, but incur additional coherent seqno reads when
226 * following the chain of request breadcrumbs. Since it is most likely
227 * that we have a single client waiting on each seqno, then reducing
228 * the overhead of waking that client is much preferred.
230 struct intel_breadcrumbs {
231 spinlock_t irq_lock; /* protects irq_*; irqsafe */
232 struct intel_wait *irq_wait; /* oldest waiter by retirement */
234 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
235 struct rb_root waiters; /* sorted by retirement, priority */
236 struct rb_root signals; /* sorted by retirement */
237 struct task_struct *signaler; /* used for fence signalling */
238 struct drm_i915_gem_request __rcu *first_signal;
239 struct timer_list fake_irq; /* used after a missed interrupt */
240 struct timer_list hangcheck; /* detect missed interrupts */
242 unsigned int hangcheck_interrupts;
245 bool irq_enabled : 1;
246 I915_SELFTEST_DECLARE(bool mock : 1);
250 * A pool of objects to use as shadow copies of client batch buffers
251 * when the command parser is enabled. Prevents the client from
252 * modifying the batch contents after software parsing.
254 struct i915_gem_batch_pool batch_pool;
256 struct intel_hw_status_page status_page;
257 struct i915_ctx_workarounds wa_ctx;
258 struct i915_vma *scratch;
260 u32 irq_keep_mask; /* always keep these interrupts */
261 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
262 void (*irq_enable)(struct intel_engine_cs *engine);
263 void (*irq_disable)(struct intel_engine_cs *engine);
265 int (*init_hw)(struct intel_engine_cs *engine);
266 void (*reset_hw)(struct intel_engine_cs *engine,
267 struct drm_i915_gem_request *req);
269 void (*set_default_submission)(struct intel_engine_cs *engine);
271 struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
272 struct i915_gem_context *ctx);
273 void (*context_unpin)(struct intel_engine_cs *engine,
274 struct i915_gem_context *ctx);
275 int (*request_alloc)(struct drm_i915_gem_request *req);
276 int (*init_context)(struct drm_i915_gem_request *req);
278 int (*emit_flush)(struct drm_i915_gem_request *request,
280 #define EMIT_INVALIDATE BIT(0)
281 #define EMIT_FLUSH BIT(1)
282 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
283 int (*emit_bb_start)(struct drm_i915_gem_request *req,
284 u64 offset, u32 length,
285 unsigned int dispatch_flags);
286 #define I915_DISPATCH_SECURE BIT(0)
287 #define I915_DISPATCH_PINNED BIT(1)
288 #define I915_DISPATCH_RS BIT(2)
289 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
291 int emit_breadcrumb_sz;
293 /* Pass the request to the hardware queue (e.g. directly into
294 * the legacy ringbuffer or to the end of an execlist).
296 * This is called from an atomic context with irqs disabled; must
299 void (*submit_request)(struct drm_i915_gem_request *req);
301 /* Call when the priority on a request has changed and it and its
302 * dependencies may need rescheduling. Note the request itself may
303 * not be ready to run!
305 * Called under the struct_mutex.
307 void (*schedule)(struct drm_i915_gem_request *request,
310 /* Some chipsets are not quite as coherent as advertised and need
311 * an expensive kick to force a true read of the up-to-date seqno.
312 * However, the up-to-date seqno is not always required and the last
313 * seen value is good enough. Note that the seqno will always be
314 * monotonic, even if not coherent.
316 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
317 void (*cleanup)(struct intel_engine_cs *engine);
319 /* GEN8 signal/wait table - never trust comments!
320 * signal to signal to signal to signal to signal to
321 * RCS VCS BCS VECS VCS2
322 * --------------------------------------------------------------------
323 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
324 * |-------------------------------------------------------------------
325 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
326 * |-------------------------------------------------------------------
327 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
328 * |-------------------------------------------------------------------
329 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
330 * |-------------------------------------------------------------------
331 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
332 * |-------------------------------------------------------------------
335 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
336 * ie. transpose of g(x, y)
338 * sync from sync from sync from sync from sync from
339 * RCS VCS BCS VECS VCS2
340 * --------------------------------------------------------------------
341 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
342 * |-------------------------------------------------------------------
343 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
344 * |-------------------------------------------------------------------
345 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
346 * |-------------------------------------------------------------------
347 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
348 * |-------------------------------------------------------------------
349 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
350 * |-------------------------------------------------------------------
353 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
354 * ie. transpose of f(x, y)
358 #define GEN6_SEMAPHORE_LAST VECS_HW
359 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
360 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
362 /* our mbox written by others */
363 u32 wait[GEN6_NUM_SEMAPHORES];
364 /* mboxes this ring signals to */
365 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
367 u64 signal_ggtt[I915_NUM_ENGINES];
371 int (*sync_to)(struct drm_i915_gem_request *req,
372 struct drm_i915_gem_request *signal);
373 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
377 struct tasklet_struct irq_tasklet;
378 struct i915_priolist default_priolist;
380 struct execlist_port {
381 struct drm_i915_gem_request *request_count;
382 #define EXECLIST_COUNT_BITS 2
383 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
384 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
385 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
386 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
387 #define port_set(p, packed) ((p)->request_count = (packed))
388 #define port_isset(p) ((p)->request_count)
389 #define port_index(p, e) ((p) - (e)->execlist_port)
390 GEM_DEBUG_DECL(u32 context_id);
392 struct rb_root execlist_queue;
393 struct rb_node *execlist_first;
394 unsigned int fw_domains;
396 /* Contexts are pinned whilst they are active on the GPU. The last
397 * context executed remains active whilst the GPU is idle - the
398 * switch away and write to the context object only occurs on the
399 * next execution. Contexts are only unpinned on retirement of the
400 * following request ensuring that we can always write to the object
401 * on the context switch even after idling. Across suspend, we switch
402 * to the kernel context and trash it as the save may not happen
403 * before the hardware is powered down.
405 struct i915_gem_context *last_retired_context;
407 /* We track the current MI_SET_CONTEXT in order to eliminate
408 * redudant context switches. This presumes that requests are not
409 * reordered! Or when they are the tracking is updated along with
410 * the emission of individual requests into the legacy command
413 struct i915_gem_context *legacy_active_context;
415 /* status_notifier: list of callbacks for context-switch changes */
416 struct atomic_notifier_head context_status_notifier;
418 struct intel_engine_hangcheck hangcheck;
420 #define I915_ENGINE_USING_CMD_PARSER BIT(0)
421 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(3)
425 * Table of commands the command parser needs to know about
428 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
431 * Table of registers allowed in commands that read/write registers.
433 const struct drm_i915_reg_table *reg_tables;
437 * Returns the bitmask for the length field of the specified command.
438 * Return 0 for an unrecognized/invalid command.
440 * If the command parser finds an entry for a command in the engine's
441 * cmd_tables, it gets the command's length based on the table entry.
442 * If not, it calls this function to determine the per-engine length
443 * field encoding for the command (i.e. different opcode ranges use
444 * certain bits to encode the command length in the header).
446 u32 (*get_cmd_length_mask)(u32 cmd_header);
450 intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
452 return engine->flags & I915_ENGINE_USING_CMD_PARSER;
456 intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
458 return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
461 static inline unsigned int
462 intel_engine_flag(const struct intel_engine_cs *engine)
464 return BIT(engine->id);
468 intel_read_status_page(struct intel_engine_cs *engine, int reg)
470 /* Ensure that the compiler doesn't optimize away the load. */
471 return READ_ONCE(engine->status_page.page_addr[reg]);
475 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
477 /* Writing into the status page should be done sparingly. Since
478 * we do when we are uncertain of the device state, we take a bit
479 * of extra paranoia to try and ensure that the HWS takes the value
480 * we give and that it doesn't end up trapped inside the CPU!
482 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
484 clflush(&engine->status_page.page_addr[reg]);
485 engine->status_page.page_addr[reg] = value;
486 clflush(&engine->status_page.page_addr[reg]);
489 WRITE_ONCE(engine->status_page.page_addr[reg], value);
494 * Reads a dword out of the status page, which is written to from the command
495 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
498 * The following dwords have a reserved meaning:
499 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
500 * 0x04: ring 0 head pointer
501 * 0x05: ring 1 head pointer (915-class)
502 * 0x06: ring 2 head pointer (915-class)
503 * 0x10-0x1b: Context status DWords (GM45)
504 * 0x1f: Last written status offset. (GM45)
505 * 0x20-0x2f: Reserved (Gen6+)
507 * The area from dword 0x30 to 0x3ff is available for driver usage.
509 #define I915_GEM_HWS_INDEX 0x30
510 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
511 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
512 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
515 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
516 int intel_ring_pin(struct intel_ring *ring,
517 struct drm_i915_private *i915,
518 unsigned int offset_bias);
519 void intel_ring_reset(struct intel_ring *ring, u32 tail);
520 unsigned int intel_ring_update_space(struct intel_ring *ring);
521 void intel_ring_unpin(struct intel_ring *ring);
522 void intel_ring_free(struct intel_ring *ring);
524 void intel_engine_stop(struct intel_engine_cs *engine);
525 void intel_engine_cleanup(struct intel_engine_cs *engine);
527 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
529 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
531 u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
535 intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
539 * This serves as a placeholder in the code so that the reader
540 * can compare against the preceding intel_ring_begin() and
541 * check that the number of dwords emitted matches the space
542 * reserved for the command packet (i.e. the value passed to
543 * intel_ring_begin()).
545 GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
549 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
551 return pos & (ring->size - 1);
555 intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
557 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
558 u32 offset = addr - req->ring->vaddr;
559 GEM_BUG_ON(offset > req->ring->size);
560 return intel_ring_wrap(req->ring, offset);
564 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
566 /* We could combine these into a single tail operation, but keeping
567 * them as seperate tests will help identify the cause should one
570 GEM_BUG_ON(!IS_ALIGNED(tail, 8));
571 GEM_BUG_ON(tail >= ring->size);
575 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
576 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
577 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
578 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
579 * same cacheline, the Head Pointer must not be greater than the Tail
582 * We use ring->head as the last known location of the actual RING_HEAD,
583 * it may have advanced but in the worst case it is equally the same
584 * as ring->head and so we should never program RING_TAIL to advance
585 * into the same cacheline as ring->head.
587 #define cacheline(a) round_down(a, CACHELINE_BYTES)
588 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
593 static inline unsigned int
594 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
596 /* Whilst writes to the tail are strictly order, there is no
597 * serialisation between readers and the writers. The tail may be
598 * read by i915_gem_request_retire() just as it is being updated
599 * by execlists, as although the breadcrumb is complete, the context
600 * switch hasn't been seen.
602 assert_ring_tail_valid(ring, tail);
607 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
609 void intel_engine_setup_common(struct intel_engine_cs *engine);
610 int intel_engine_init_common(struct intel_engine_cs *engine);
611 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
612 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
614 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
615 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
616 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
617 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
619 u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
620 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
622 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
624 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
627 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
629 /* We are only peeking at the tail of the submit queue (and not the
630 * queue itself) in order to gain a hint as to the current active
631 * state of the engine. Callers are not expected to be taking
632 * engine->timeline->lock, nor are they expected to be concerned
633 * wtih serialising this hint with anything, so document it as
634 * a hint and nothing more.
636 return READ_ONCE(engine->timeline->seqno);
639 int init_workarounds_ring(struct intel_engine_cs *engine);
640 int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
642 void intel_engine_get_instdone(struct intel_engine_cs *engine,
643 struct intel_instdone *instdone);
646 * Arbitrary size for largest possible 'add request' sequence. The code paths
647 * are complex and variable. Empirical measurement shows that the worst case
648 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
649 * we need to allocate double the largest single packet within that emission
650 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
652 #define MIN_SPACE_FOR_ADD_REQUEST 336
654 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
656 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
659 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
660 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
662 static inline void intel_wait_init(struct intel_wait *wait,
663 struct drm_i915_gem_request *rq)
669 static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
675 static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
681 intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
684 return intel_wait_has_seqno(wait);
688 intel_wait_update_request(struct intel_wait *wait,
689 const struct drm_i915_gem_request *rq)
691 return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
695 intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
697 return wait->seqno == seqno;
701 intel_wait_check_request(const struct intel_wait *wait,
702 const struct drm_i915_gem_request *rq)
704 return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
707 static inline bool intel_wait_complete(const struct intel_wait *wait)
709 return RB_EMPTY_NODE(&wait->node);
712 bool intel_engine_add_wait(struct intel_engine_cs *engine,
713 struct intel_wait *wait);
714 void intel_engine_remove_wait(struct intel_engine_cs *engine,
715 struct intel_wait *wait);
716 void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
718 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
720 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
722 return READ_ONCE(engine->breadcrumbs.irq_wait);
725 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
726 #define ENGINE_WAKEUP_WAITER BIT(0)
727 #define ENGINE_WAKEUP_ASLEEP BIT(1)
729 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
730 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
732 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
733 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
734 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
736 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
738 memset(batch, 0, 6 * sizeof(u32));
740 batch[0] = GFX_OP_PIPE_CONTROL(6);
747 bool intel_engine_is_idle(struct intel_engine_cs *engine);
748 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
750 void intel_engines_mark_idle(struct drm_i915_private *i915);
751 void intel_engines_reset_default_submission(struct drm_i915_private *i915);
754 __intel_engine_can_store_dword(unsigned int gen, unsigned int class)
757 return false; /* uses physical not virtual addresses */
759 if (gen == 6 && class == VIDEO_DECODE_CLASS)
760 return false; /* b0rked */
765 #endif /* _INTEL_RINGBUFFER_H_ */