2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #ifndef KFD_IOCTL_H_INCLUDED
24 #define KFD_IOCTL_H_INCLUDED
27 #include <linux/ioctl.h>
30 * - 1.1 - initial version
31 * - 1.3 - Add SMI events support
32 * - 1.4 - Indicate new SRAM EDC bit in device properties
34 * - 1.6 - Query clear flags in SVM get_attr API
35 * - 1.7 - Checkpoint Restore (CRIU) API
36 * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs
37 * - 1.9 - Add available memory ioctl
38 * - 1.10 - Add SMI profiler event log
39 * - 1.11 - Add unified memory for ctx save/restore area
40 * - 1.12 - Add DMA buf export ioctl
41 * - 1.13 - Add debugger API
42 * - 1.14 - Update kfd_event_data
44 #define KFD_IOCTL_MAJOR_VERSION 1
45 #define KFD_IOCTL_MINOR_VERSION 14
47 struct kfd_ioctl_get_version_args {
48 __u32 major_version; /* from KFD */
49 __u32 minor_version; /* from KFD */
52 /* For kfd_ioctl_create_queue_args.queue_type. */
53 #define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
54 #define KFD_IOC_QUEUE_TYPE_SDMA 0x1
55 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
56 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
58 #define KFD_MAX_QUEUE_PERCENTAGE 100
59 #define KFD_MAX_QUEUE_PRIORITY 15
61 struct kfd_ioctl_create_queue_args {
62 __u64 ring_base_address; /* to KFD */
63 __u64 write_pointer_address; /* from KFD */
64 __u64 read_pointer_address; /* from KFD */
65 __u64 doorbell_offset; /* from KFD */
67 __u32 ring_size; /* to KFD */
68 __u32 gpu_id; /* to KFD */
69 __u32 queue_type; /* to KFD */
70 __u32 queue_percentage; /* to KFD */
71 __u32 queue_priority; /* to KFD */
72 __u32 queue_id; /* from KFD */
74 __u64 eop_buffer_address; /* to KFD */
75 __u64 eop_buffer_size; /* to KFD */
76 __u64 ctx_save_restore_address; /* to KFD */
77 __u32 ctx_save_restore_size; /* to KFD */
78 __u32 ctl_stack_size; /* to KFD */
81 struct kfd_ioctl_destroy_queue_args {
82 __u32 queue_id; /* to KFD */
86 struct kfd_ioctl_update_queue_args {
87 __u64 ring_base_address; /* to KFD */
89 __u32 queue_id; /* to KFD */
90 __u32 ring_size; /* to KFD */
91 __u32 queue_percentage; /* to KFD */
92 __u32 queue_priority; /* to KFD */
95 struct kfd_ioctl_set_cu_mask_args {
96 __u32 queue_id; /* to KFD */
97 __u32 num_cu_mask; /* to KFD */
98 __u64 cu_mask_ptr; /* to KFD */
101 struct kfd_ioctl_get_queue_wave_state_args {
102 __u64 ctl_stack_address; /* to KFD */
103 __u32 ctl_stack_used_size; /* from KFD */
104 __u32 save_area_used_size; /* from KFD */
105 __u32 queue_id; /* to KFD */
109 struct kfd_ioctl_get_available_memory_args {
110 __u64 available; /* from KFD */
111 __u32 gpu_id; /* to KFD */
115 struct kfd_dbg_device_info_entry {
116 __u64 exception_status;
128 __u32 subsystem_vendor_id;
129 __u32 subsystem_device_id;
131 __u32 gfx_target_version;
133 __u32 max_waves_per_simd;
135 __u32 simd_arrays_per_engine;
141 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
142 #define KFD_IOC_CACHE_POLICY_COHERENT 0
143 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
145 struct kfd_ioctl_set_memory_policy_args {
146 __u64 alternate_aperture_base; /* to KFD */
147 __u64 alternate_aperture_size; /* to KFD */
149 __u32 gpu_id; /* to KFD */
150 __u32 default_policy; /* to KFD */
151 __u32 alternate_policy; /* to KFD */
156 * All counters are monotonic. They are used for profiling of compute jobs.
157 * The profiling is done by userspace.
159 * In case of GPU reset, the counter should not be affected.
162 struct kfd_ioctl_get_clock_counters_args {
163 __u64 gpu_clock_counter; /* from KFD */
164 __u64 cpu_clock_counter; /* from KFD */
165 __u64 system_clock_counter; /* from KFD */
166 __u64 system_clock_freq; /* from KFD */
168 __u32 gpu_id; /* to KFD */
172 struct kfd_process_device_apertures {
173 __u64 lds_base; /* from KFD */
174 __u64 lds_limit; /* from KFD */
175 __u64 scratch_base; /* from KFD */
176 __u64 scratch_limit; /* from KFD */
177 __u64 gpuvm_base; /* from KFD */
178 __u64 gpuvm_limit; /* from KFD */
179 __u32 gpu_id; /* from KFD */
184 * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
185 * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
186 * unlimited number of GPUs.
188 #define NUM_OF_SUPPORTED_GPUS 7
189 struct kfd_ioctl_get_process_apertures_args {
190 struct kfd_process_device_apertures
191 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
193 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
198 struct kfd_ioctl_get_process_apertures_new_args {
199 /* User allocated. Pointer to struct kfd_process_device_apertures
200 * filled in by Kernel
202 __u64 kfd_process_device_apertures_ptr;
203 /* to KFD - indicates amount of memory present in
204 * kfd_process_device_apertures_ptr
205 * from KFD - Number of entries filled by KFD.
211 #define MAX_ALLOWED_NUM_POINTS 100
212 #define MAX_ALLOWED_AW_BUFF_SIZE 4096
213 #define MAX_ALLOWED_WAC_BUFF_SIZE 128
215 struct kfd_ioctl_dbg_register_args {
216 __u32 gpu_id; /* to KFD */
220 struct kfd_ioctl_dbg_unregister_args {
221 __u32 gpu_id; /* to KFD */
225 struct kfd_ioctl_dbg_address_watch_args {
226 __u64 content_ptr; /* a pointer to the actual content */
227 __u32 gpu_id; /* to KFD */
228 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
231 struct kfd_ioctl_dbg_wave_control_args {
232 __u64 content_ptr; /* a pointer to the actual content */
233 __u32 gpu_id; /* to KFD */
234 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
237 #define KFD_INVALID_FD 0xffffffff
239 /* Matching HSA_EVENTTYPE */
240 #define KFD_IOC_EVENT_SIGNAL 0
241 #define KFD_IOC_EVENT_NODECHANGE 1
242 #define KFD_IOC_EVENT_DEVICESTATECHANGE 2
243 #define KFD_IOC_EVENT_HW_EXCEPTION 3
244 #define KFD_IOC_EVENT_SYSTEM_EVENT 4
245 #define KFD_IOC_EVENT_DEBUG_EVENT 5
246 #define KFD_IOC_EVENT_PROFILE_EVENT 6
247 #define KFD_IOC_EVENT_QUEUE_EVENT 7
248 #define KFD_IOC_EVENT_MEMORY 8
250 #define KFD_IOC_WAIT_RESULT_COMPLETE 0
251 #define KFD_IOC_WAIT_RESULT_TIMEOUT 1
252 #define KFD_IOC_WAIT_RESULT_FAIL 2
254 #define KFD_SIGNAL_EVENT_LIMIT 4096
256 /* For kfd_event_data.hw_exception_data.reset_type. */
257 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
258 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
260 /* For kfd_event_data.hw_exception_data.reset_cause. */
261 #define KFD_HW_EXCEPTION_GPU_HANG 0
262 #define KFD_HW_EXCEPTION_ECC 1
264 /* For kfd_hsa_memory_exception_data.ErrorType */
265 #define KFD_MEM_ERR_NO_RAS 0
266 #define KFD_MEM_ERR_SRAM_ECC 1
267 #define KFD_MEM_ERR_POISON_CONSUMED 2
268 #define KFD_MEM_ERR_GPU_HANG 3
270 struct kfd_ioctl_create_event_args {
271 __u64 event_page_offset; /* from KFD */
272 __u32 event_trigger_data; /* from KFD - signal events only */
273 __u32 event_type; /* to KFD */
274 __u32 auto_reset; /* to KFD */
275 __u32 node_id; /* to KFD - only valid for certain
277 __u32 event_id; /* from KFD */
278 __u32 event_slot_index; /* from KFD */
281 struct kfd_ioctl_destroy_event_args {
282 __u32 event_id; /* to KFD */
286 struct kfd_ioctl_set_event_args {
287 __u32 event_id; /* to KFD */
291 struct kfd_ioctl_reset_event_args {
292 __u32 event_id; /* to KFD */
296 struct kfd_memory_exception_failure {
297 __u32 NotPresent; /* Page not present or supervisor privilege */
298 __u32 ReadOnly; /* Write access to a read-only page */
299 __u32 NoExecute; /* Execute access to a page marked NX */
300 __u32 imprecise; /* Can't determine the exact fault address */
303 /* memory exception data */
304 struct kfd_hsa_memory_exception_data {
305 struct kfd_memory_exception_failure failure;
308 __u32 ErrorType; /* 0 = no RAS error,
310 * 2 = Link_SYNFLOOD (poison),
311 * 3 = GPU hang (not attributable to a specific cause),
312 * other values reserved
316 /* hw exception data */
317 struct kfd_hsa_hw_exception_data {
324 /* hsa signal event data */
325 struct kfd_hsa_signal_event_data {
326 __u64 last_event_age; /* to and from KFD */
330 struct kfd_event_data {
333 struct kfd_hsa_memory_exception_data memory_exception_data;
334 struct kfd_hsa_hw_exception_data hw_exception_data;
335 /* To and From KFD */
336 struct kfd_hsa_signal_event_data signal_event_data;
338 __u64 kfd_event_data_ext; /* pointer to an extension structure
339 for future exception types */
340 __u32 event_id; /* to KFD */
344 struct kfd_ioctl_wait_events_args {
345 __u64 events_ptr; /* pointed to struct
346 kfd_event_data array, to KFD */
347 __u32 num_events; /* to KFD */
348 __u32 wait_for_all; /* to KFD */
349 __u32 timeout; /* to KFD */
350 __u32 wait_result; /* from KFD */
353 struct kfd_ioctl_set_scratch_backing_va_args {
354 __u64 va_addr; /* to KFD */
355 __u32 gpu_id; /* to KFD */
359 struct kfd_ioctl_get_tile_config_args {
360 /* to KFD: pointer to tile array */
361 __u64 tile_config_ptr;
362 /* to KFD: pointer to macro tile array */
363 __u64 macro_tile_config_ptr;
364 /* to KFD: array size allocated by user mode
365 * from KFD: array size filled by kernel
367 __u32 num_tile_configs;
368 /* to KFD: array size allocated by user mode
369 * from KFD: array size filled by kernel
371 __u32 num_macro_tile_configs;
373 __u32 gpu_id; /* to KFD */
374 __u32 gb_addr_config; /* from KFD */
375 __u32 num_banks; /* from KFD */
376 __u32 num_ranks; /* from KFD */
377 /* struct size can be extended later if needed
378 * without breaking ABI compatibility
382 struct kfd_ioctl_set_trap_handler_args {
383 __u64 tba_addr; /* to KFD */
384 __u64 tma_addr; /* to KFD */
385 __u32 gpu_id; /* to KFD */
389 struct kfd_ioctl_acquire_vm_args {
390 __u32 drm_fd; /* to KFD */
391 __u32 gpu_id; /* to KFD */
394 /* Allocation flags: memory types */
395 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
396 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
397 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
398 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
399 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
400 /* Allocation flags: attributes/access options */
401 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
402 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
403 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
404 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
405 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
406 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
407 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
409 /* Allocate memory for later SVM (shared virtual memory) mapping.
411 * @va_addr: virtual address of the memory to be allocated
412 * all later mappings on all GPUs will use this address
413 * @size: size in bytes
414 * @handle: buffer handle returned to user mode, used to refer to
415 * this allocation for mapping, unmapping and freeing
416 * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
417 * for userptrs this is overloaded to specify the CPU address
418 * @gpu_id: device identifier
419 * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
421 struct kfd_ioctl_alloc_memory_of_gpu_args {
422 __u64 va_addr; /* to KFD */
423 __u64 size; /* to KFD */
424 __u64 handle; /* from KFD */
425 __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */
426 __u32 gpu_id; /* to KFD */
430 /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
432 * @handle: memory handle returned by alloc
434 struct kfd_ioctl_free_memory_of_gpu_args {
435 __u64 handle; /* to KFD */
438 /* Map memory to one or more GPUs
440 * @handle: memory handle returned by alloc
441 * @device_ids_array_ptr: array of gpu_ids (__u32 per device)
442 * @n_devices: number of devices in the array
443 * @n_success: number of devices mapped successfully
445 * @n_success returns information to the caller how many devices from
446 * the start of the array have mapped the buffer successfully. It can
447 * be passed into a subsequent retry call to skip those devices. For
448 * the first call the caller should initialize it to 0.
450 * If the ioctl completes with return code 0 (success), n_success ==
453 struct kfd_ioctl_map_memory_to_gpu_args {
454 __u64 handle; /* to KFD */
455 __u64 device_ids_array_ptr; /* to KFD */
456 __u32 n_devices; /* to KFD */
457 __u32 n_success; /* to/from KFD */
460 /* Unmap memory from one or more GPUs
462 * same arguments as for mapping
464 struct kfd_ioctl_unmap_memory_from_gpu_args {
465 __u64 handle; /* to KFD */
466 __u64 device_ids_array_ptr; /* to KFD */
467 __u32 n_devices; /* to KFD */
468 __u32 n_success; /* to/from KFD */
471 /* Allocate GWS for specific queue
473 * @queue_id: queue's id that GWS is allocated for
474 * @num_gws: how many GWS to allocate
475 * @first_gws: index of the first GWS allocated.
476 * only support contiguous GWS allocation
478 struct kfd_ioctl_alloc_queue_gws_args {
479 __u32 queue_id; /* to KFD */
480 __u32 num_gws; /* to KFD */
481 __u32 first_gws; /* from KFD */
485 struct kfd_ioctl_get_dmabuf_info_args {
486 __u64 size; /* from KFD */
487 __u64 metadata_ptr; /* to KFD */
488 __u32 metadata_size; /* to KFD (space allocated by user)
489 * from KFD (actual metadata size)
491 __u32 gpu_id; /* from KFD */
492 __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
493 __u32 dmabuf_fd; /* to KFD */
496 struct kfd_ioctl_import_dmabuf_args {
497 __u64 va_addr; /* to KFD */
498 __u64 handle; /* from KFD */
499 __u32 gpu_id; /* to KFD */
500 __u32 dmabuf_fd; /* to KFD */
503 struct kfd_ioctl_export_dmabuf_args {
504 __u64 handle; /* to KFD */
505 __u32 flags; /* to KFD */
506 __u32 dmabuf_fd; /* from KFD */
510 * KFD SMI(System Management Interface) events
513 KFD_SMI_EVENT_NONE = 0, /* not used */
514 KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
515 KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
516 KFD_SMI_EVENT_GPU_PRE_RESET = 3,
517 KFD_SMI_EVENT_GPU_POST_RESET = 4,
518 KFD_SMI_EVENT_MIGRATE_START = 5,
519 KFD_SMI_EVENT_MIGRATE_END = 6,
520 KFD_SMI_EVENT_PAGE_FAULT_START = 7,
521 KFD_SMI_EVENT_PAGE_FAULT_END = 8,
522 KFD_SMI_EVENT_QUEUE_EVICTION = 9,
523 KFD_SMI_EVENT_QUEUE_RESTORE = 10,
524 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
527 * max event number, as a flag bit to get events from all processes,
528 * this requires super user permission, otherwise will not be able to
529 * receive event from any process. Without this flag to receive events
532 KFD_SMI_EVENT_ALL_PROCESS = 64
535 enum KFD_MIGRATE_TRIGGERS {
536 KFD_MIGRATE_TRIGGER_PREFETCH,
537 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
538 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
539 KFD_MIGRATE_TRIGGER_TTM_EVICTION
542 enum KFD_QUEUE_EVICTION_TRIGGERS {
543 KFD_QUEUE_EVICTION_TRIGGER_SVM,
544 KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
545 KFD_QUEUE_EVICTION_TRIGGER_TTM,
546 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
547 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
548 KFD_QUEUE_EVICTION_CRIU_RESTORE
551 enum KFD_SVM_UNMAP_TRIGGERS {
552 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
553 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
554 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
557 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
558 #define KFD_SMI_EVENT_MSG_SIZE 96
560 struct kfd_ioctl_smi_events_args {
561 __u32 gpuid; /* to KFD */
562 __u32 anon_fd; /* from KFD */
565 /**************************************************************************************************
566 * CRIU IOCTLs (Checkpoint Restore In Userspace)
568 * When checkpointing a process, the userspace application will perform:
569 * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
571 * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
572 * 3. UNPAUSE op to un-evict all the queues
574 * When restoring a process, the CRIU userspace application will perform:
576 * 1. RESTORE op to restore process contents
577 * 2. RESUME op to start the process
579 * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
580 * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
584 KFD_CRIU_OP_PROCESS_INFO,
585 KFD_CRIU_OP_CHECKPOINT,
592 * kfd_ioctl_criu_args - Arguments perform CRIU operation
593 * @devices: [in/out] User pointer to memory location for devices information.
594 * This is an array of type kfd_criu_device_bucket.
595 * @bos: [in/out] User pointer to memory location for BOs information
596 * This is an array of type kfd_criu_bo_bucket.
597 * @priv_data: [in/out] User pointer to memory location for private data
598 * @priv_data_size: [in/out] Size of priv_data in bytes
599 * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array.
600 * @num_bos [in/out] Number of BOs used by process. Size of @bos array.
601 * @num_objects: [in/out] Number of objects used by process. Objects are opaque to
603 * @pid: [in/out] PID of the process being checkpointed
604 * @op [in] Type of operation (kfd_criu_op)
606 * Return: 0 on success, -errno on failure
608 struct kfd_ioctl_criu_args {
609 __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */
610 __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */
611 __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */
612 __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */
613 __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */
614 __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */
615 __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */
616 __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */
620 struct kfd_criu_device_bucket {
627 struct kfd_criu_bo_bucket {
631 __u64 restored_offset; /* During restore, updated offset for BO */
632 __u32 gpu_id; /* This is the user_gpu_id */
638 /* CRIU IOCTLs - END */
639 /**************************************************************************************************/
641 /* Register offset inside the remapped mmio page
643 enum kfd_mmio_remap {
644 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
645 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
648 /* Guarantee host access to memory */
649 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
650 /* Fine grained coherency between all devices with access */
651 #define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
652 /* Use any GPU in same hive as preferred device */
653 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
654 /* GPUs only read, allows replication */
655 #define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
656 /* Allow execution on GPU */
657 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
658 /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
659 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
660 /* Keep GPU memory mapping always valid as if XNACK is disable */
661 #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
664 * kfd_ioctl_svm_op - SVM ioctl operations
666 * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
667 * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
669 enum kfd_ioctl_svm_op {
670 KFD_IOCTL_SVM_OP_SET_ATTR,
671 KFD_IOCTL_SVM_OP_GET_ATTR
674 /** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
676 * GPU IDs are used to specify GPUs as preferred and prefetch locations.
677 * Below definitions are used for system memory or for leaving the preferred
678 * location unspecified.
680 enum kfd_ioctl_svm_location {
681 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
682 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
686 * kfd_ioctl_svm_attr_type - SVM attribute types
688 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
690 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
691 * system memory. Setting this triggers an
692 * immediate prefetch (migration).
693 * @KFD_IOCTL_SVM_ATTR_ACCESS:
694 * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
695 * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
696 * by the attribute value
697 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
698 * KFD_IOCTL_SVM_FLAG_...)
699 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
700 * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
703 enum kfd_ioctl_svm_attr_type {
704 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
705 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
706 KFD_IOCTL_SVM_ATTR_ACCESS,
707 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
708 KFD_IOCTL_SVM_ATTR_NO_ACCESS,
709 KFD_IOCTL_SVM_ATTR_SET_FLAGS,
710 KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
711 KFD_IOCTL_SVM_ATTR_GRANULARITY
715 * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
717 * The meaning of the @value depends on the attribute type.
719 * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
720 * @value: attribute value
722 struct kfd_ioctl_svm_attribute {
728 * kfd_ioctl_svm_args - Arguments for SVM ioctl
730 * @op specifies the operation to perform (see enum
731 * @kfd_ioctl_svm_op). @start_addr and @size are common for all
734 * A variable number of attributes can be given in @attrs.
735 * @nattr specifies the number of attributes. New attributes can be
736 * added in the future without breaking the ABI. If unknown attributes
737 * are given, the function returns -EINVAL.
739 * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
740 * range. It may overlap existing virtual address ranges. If it does,
741 * the existing ranges will be split such that the attribute changes
742 * only apply to the specified address range.
744 * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
745 * over all memory in the given range and returns the result as the
746 * attribute value. If different pages have different preferred or
747 * prefetch locations, 0xffffffff will be returned for
748 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
749 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
750 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
751 * aggregated by bitwise AND. That means, a flag will be set in the
752 * output, if that flag is set for all pages in the range. For
753 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
754 * aggregated by bitwise NOR. That means, a flag will be set in the
755 * output, if that flag is clear for all pages in the range.
756 * The minimum migration granularity throughout the range will be
757 * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
759 * Querying of accessibility attributes works by initializing the
760 * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
761 * GPUID being queried. Multiple attributes can be given to allow
762 * querying multiple GPUIDs. The ioctl function overwrites the
763 * attribute type to indicate the access for the specified GPU.
765 struct kfd_ioctl_svm_args {
770 /* Variable length array of attributes */
771 struct kfd_ioctl_svm_attribute attrs[];
775 * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
777 * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process
779 * @xnack_enabled indicates whether recoverable page faults should be
780 * enabled for the current process. 0 means disabled, positive means
781 * enabled, negative means leave unchanged. If enabled, virtual address
782 * translations on GFXv9 and later AMD GPUs can return XNACK and retry
783 * the access until a valid PTE is available. This is used to implement
784 * device page faults.
786 * On output, @xnack_enabled returns the (new) current mode (0 or
787 * positive). Therefore, a negative input value can be used to query
788 * the current mode without changing it.
790 * The XNACK mode fundamentally changes the way SVM managed memory works
791 * in the driver, with subtle effects on application performance and
794 * Enabling XNACK mode requires shader programs to be compiled
795 * differently. Furthermore, not all GPUs support changing the mode
796 * per-process. Therefore changing the mode is only allowed while no
797 * user mode queues exist in the process. This ensure that no shader
798 * code is running that may be compiled for the wrong mode. And GPUs
799 * that cannot change to the requested mode will prevent the XNACK
800 * mode from occurring. All GPUs used by the process must be in the
803 * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
804 * Therefore those GPUs are not considered for the XNACK mode switch.
806 * Return: 0 on success, -errno on failure
808 struct kfd_ioctl_set_xnack_mode_args {
812 /* Wave launch override modes */
813 enum kfd_dbg_trap_override_mode {
814 KFD_DBG_TRAP_OVERRIDE_OR = 0,
815 KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
818 /* Wave launch overrides */
819 enum kfd_dbg_trap_mask {
820 KFD_DBG_TRAP_MASK_FP_INVALID = 1,
821 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
822 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
823 KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
824 KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
825 KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
826 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
827 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
828 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
829 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
830 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
833 /* Wave launch modes */
834 enum kfd_dbg_trap_wave_launch_mode {
835 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
836 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
837 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
840 /* Address watch modes */
841 enum kfd_dbg_trap_address_watch_mode {
842 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
843 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
844 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
845 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
848 /* Additional wave settings */
849 enum kfd_dbg_trap_flags {
850 KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
853 /* Trap exceptions */
854 enum kfd_dbg_trap_exception_code {
857 EC_QUEUE_WAVE_ABORT = 1,
858 EC_QUEUE_WAVE_TRAP = 2,
859 EC_QUEUE_WAVE_MATH_ERROR = 3,
860 EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
861 EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
862 EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
863 EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
864 EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
865 EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
866 EC_QUEUE_PACKET_RESERVED = 19,
867 EC_QUEUE_PACKET_UNSUPPORTED = 20,
868 EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
869 EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
870 EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
871 EC_QUEUE_PREEMPTION_ERROR = 30,
874 EC_DEVICE_QUEUE_DELETE = 32,
875 EC_DEVICE_MEMORY_VIOLATION = 33,
876 EC_DEVICE_RAS_ERROR = 34,
877 EC_DEVICE_FATAL_HALT = 35,
880 EC_PROCESS_RUNTIME = 48,
881 EC_PROCESS_DEVICE_REMOVE = 49,
885 /* Mask generated by ecode in kfd_dbg_trap_exception_code */
886 #define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
888 /* Masks for exception code type checks below */
889 #define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \
890 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \
891 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \
892 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \
893 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \
894 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \
895 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
896 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
897 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
898 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
899 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
900 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
901 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
902 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \
903 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \
904 KFD_EC_MASK(EC_QUEUE_NEW))
905 #define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \
906 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \
907 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \
908 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \
909 KFD_EC_MASK(EC_DEVICE_NEW))
910 #define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
911 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
912 #define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
913 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
914 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
915 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
916 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
917 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
918 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
919 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
921 /* Checks for exception code types for KFD search */
922 #define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
923 #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
924 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
925 #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
926 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
927 #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
928 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
929 #define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \
930 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
933 /* Runtime enable states */
934 enum kfd_dbg_runtime_state {
935 DEBUG_RUNTIME_STATE_DISABLED = 0,
936 DEBUG_RUNTIME_STATE_ENABLED = 1,
937 DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
938 DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
941 /* Runtime enable status */
942 struct kfd_runtime_info {
948 /* Enable modes for runtime enable */
949 #define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
950 #define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
953 * kfd_ioctl_runtime_enable_args - Arguments for runtime enable
955 * Coordinates debug exception signalling and debug device enablement with runtime.
957 * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
958 * @mode_mask - mask to set mode
959 * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
960 * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
961 * @capabilities_mask - mask to notify runtime on what KFD supports
963 * Return - 0 on SUCCESS.
964 * - EBUSY if runtime enable call already pending.
965 * - EEXIST if user queues already active prior to call.
966 * If process is debug enabled, runtime enable will enable debug devices and
967 * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
968 * to unblock - see kfd_ioctl_dbg_trap_args.
971 struct kfd_ioctl_runtime_enable_args {
974 __u32 capabilities_mask;
977 /* Queue information */
978 struct kfd_queue_snapshot_entry {
979 __u64 exception_status;
980 __u64 ring_base_address;
981 __u64 write_pointer_address;
982 __u64 read_pointer_address;
983 __u64 ctx_save_restore_address;
988 __u32 ctx_save_restore_area_size;
992 /* Queue status return for suspend/resume */
993 #define KFD_DBG_QUEUE_ERROR_BIT 30
994 #define KFD_DBG_QUEUE_INVALID_BIT 31
995 #define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
996 #define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
998 /* Context save area header information */
999 struct kfd_context_save_area_header {
1001 __u32 control_stack_offset;
1002 __u32 control_stack_size;
1003 __u32 wave_state_offset;
1004 __u32 wave_state_size;
1008 __u64 err_payload_addr;
1016 * For specifics on usage and return values, see documentation per operation
1017 * below. Otherwise, generic error returns apply:
1018 * - ESRCH if the process to debug does not exist.
1020 * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
1021 * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
1022 * Also returns this error if GPU hardware scheduling is not supported.
1024 * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
1025 * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
1026 * clean up of debug mode as long as process is debug enabled.
1028 * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when
1029 * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
1031 * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
1033 * - Other errors may be returned when a DBG_HW_OP occurs while the GPU
1034 * is in a fatal state.
1037 enum kfd_dbg_trap_operations {
1038 KFD_IOC_DBG_TRAP_ENABLE = 0,
1039 KFD_IOC_DBG_TRAP_DISABLE = 1,
1040 KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
1041 KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
1042 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */
1043 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */
1044 KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */
1045 KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */
1046 KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */
1047 KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */
1048 KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
1049 KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
1050 KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
1051 KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
1052 KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
1056 * kfd_ioctl_dbg_trap_enable_args
1058 * Arguments for KFD_IOC_DBG_TRAP_ENABLE.
1060 * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
1061 * kfd_ioctl_dbg_trap_args to disable debug session.
1063 * @exception_mask (IN) - exceptions to raise to the debugger
1064 * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info)
1065 * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes
1066 * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised
1067 * exceptions set in exception_mask.
1069 * Generic errors apply (see kfd_dbg_trap_operations).
1070 * Return - 0 on SUCCESS.
1071 * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
1072 * Size of kfd_runtime saved by the KFD returned to @rinfo_size.
1073 * - EBADF if KFD cannot get a reference to dbg_fd.
1074 * - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
1075 * - EINVAL if target process is already debug enabled.
1078 struct kfd_ioctl_dbg_trap_enable_args {
1079 __u64 exception_mask;
1086 * kfd_ioctl_dbg_trap_send_runtime_event_args
1089 * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
1090 * Raises exceptions to runtime.
1092 * @exception_mask (IN) - exceptions to raise to runtime
1093 * @gpu_id (IN) - target device id
1094 * @queue_id (IN) - target queue id
1096 * Generic errors apply (see kfd_dbg_trap_operations).
1097 * Return - 0 on SUCCESS.
1098 * - ENODEV if gpu_id not found.
1099 * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
1100 * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
1101 * All other exceptions are raised to runtime through err_payload_addr.
1102 * See kfd_context_save_area_header.
1104 struct kfd_ioctl_dbg_trap_send_runtime_event_args {
1105 __u64 exception_mask;
1111 * kfd_ioctl_dbg_trap_set_exceptions_enabled_args
1113 * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
1114 * Set new exceptions to be raised to the debugger.
1116 * @exception_mask (IN) - new exceptions to raise the debugger
1118 * Generic errors apply (see kfd_dbg_trap_operations).
1119 * Return - 0 on SUCCESS.
1121 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
1122 __u64 exception_mask;
1126 * kfd_ioctl_dbg_trap_set_wave_launch_override_args
1128 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
1129 * Enable HW exceptions to raise trap.
1131 * @override_mode (IN) - see kfd_dbg_trap_override_mode
1132 * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1133 * IN is the override modes requested to be enabled.
1134 * OUT is referenced in Return below.
1135 * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1136 * IN is the override modes requested for support check.
1137 * OUT is referenced in Return below.
1139 * Generic errors apply (see kfd_dbg_trap_operations).
1140 * Return - 0 on SUCCESS.
1141 * Previous enablement is returned in @enable_mask.
1142 * Actual override support is returned in @support_request_mask.
1143 * - EINVAL if override mode is not supported.
1144 * - EACCES if trap support requested is not actually supported.
1145 * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
1146 * Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
1148 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
1149 __u32 override_mode;
1151 __u32 support_request_mask;
1156 * kfd_ioctl_dbg_trap_set_wave_launch_mode_args
1158 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
1159 * Set wave launch mode.
1161 * @mode (IN) - see kfd_dbg_trap_wave_launch_mode
1163 * Generic errors apply (see kfd_dbg_trap_operations).
1164 * Return - 0 on SUCCESS.
1166 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
1172 * kfd_ioctl_dbg_trap_suspend_queues_ags
1174 * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
1177 * @exception_mask (IN) - raised exceptions to clear
1178 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1180 * @num_queues (IN) - number of queues to suspend in @queue_array_ptr
1181 * @grace_period (IN) - wave time allowance before preemption
1182 * per 1K GPU clock cycle unit
1184 * Generic errors apply (see kfd_dbg_trap_operations).
1185 * Destruction of a suspended queue is blocked until the queue is
1186 * resumed. This allows the debugger to access queue information and
1187 * the its context save area without running into a race condition on
1188 * queue destruction.
1189 * Automatically copies per queue context save area header information
1190 * into the save area base
1191 * (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
1193 * Return - Number of queues suspended on SUCCESS.
1194 * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
1195 * for each queue id in @queue_array_ptr array reports unsuccessful
1197 * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1198 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
1199 * is being destroyed.
1201 struct kfd_ioctl_dbg_trap_suspend_queues_args {
1202 __u64 exception_mask;
1203 __u64 queue_array_ptr;
1209 * kfd_ioctl_dbg_trap_resume_queues_args
1211 * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
1214 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1216 * @num_queues (IN) - number of queues to resume in @queue_array_ptr
1218 * Generic errors apply (see kfd_dbg_trap_operations).
1219 * Return - Number of queues resumed on SUCCESS.
1220 * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
1221 * for each queue id in @queue_array_ptr array reports unsuccessful
1223 * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1224 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
1226 struct kfd_ioctl_dbg_trap_resume_queues_args {
1227 __u64 queue_array_ptr;
1233 * kfd_ioctl_dbg_trap_set_node_address_watch_args
1235 * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
1236 * Sets address watch for device.
1238 * @address (IN) - watch address to set
1239 * @mode (IN) - see kfd_dbg_trap_address_watch_mode
1240 * @mask (IN) - watch address mask
1241 * @gpu_id (IN) - target gpu to set watch point
1242 * @id (OUT) - watch id allocated
1244 * Generic errors apply (see kfd_dbg_trap_operations).
1245 * Return - 0 on SUCCESS.
1246 * Allocated watch ID returned to @id.
1247 * - ENODEV if gpu_id not found.
1248 * - ENOMEM if watch IDs can be allocated
1250 struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
1259 * kfd_ioctl_dbg_trap_clear_node_address_watch_args
1261 * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
1262 * Clear address watch for device.
1264 * @gpu_id (IN) - target device to clear watch point
1265 * @id (IN) - allocated watch id to clear
1267 * Generic errors apply (see kfd_dbg_trap_operations).
1268 * Return - 0 on SUCCESS.
1269 * - ENODEV if gpu_id not found.
1270 * - EINVAL if watch ID has not been allocated.
1272 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
1278 * kfd_ioctl_dbg_trap_set_flags_args
1280 * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
1281 * Sets flags for wave behaviour.
1283 * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
1285 * Generic errors apply (see kfd_dbg_trap_operations).
1286 * Return - 0 on SUCCESS.
1287 * - EACCESS if any debug device does not allow flag options.
1289 struct kfd_ioctl_dbg_trap_set_flags_args {
1295 * kfd_ioctl_dbg_trap_query_debug_event_args
1297 * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
1299 * Find one or more raised exceptions. This function can return multiple
1300 * exceptions from a single queue or a single device with one call. To find
1301 * all raised exceptions, this function must be called repeatedly until it
1302 * returns -EAGAIN. Returned exceptions can optionally be cleared by
1303 * setting the corresponding bit in the @exception_mask input parameter.
1304 * However, clearing an exception prevents retrieving further information
1305 * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
1307 * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
1308 * @gpu_id (OUT) - gpu id of exceptions raised
1309 * @queue_id (OUT) - queue id of exceptions raised
1311 * Generic errors apply (see kfd_dbg_trap_operations).
1312 * Return - 0 on raised exception found
1313 * Raised exceptions found are returned in @exception mask
1314 * with reported source id returned in @gpu_id or @queue_id.
1315 * - EAGAIN if no raised exception has been found
1317 struct kfd_ioctl_dbg_trap_query_debug_event_args {
1318 __u64 exception_mask;
1324 * kfd_ioctl_dbg_trap_query_exception_info_args
1326 * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
1327 * Get additional info on raised exception.
1329 * @info_ptr (IN) - pointer to exception info buffer to copy to
1330 * @info_size (IN/OUT) - exception info buffer size (bytes)
1331 * @source_id (IN) - target gpu or queue id
1332 * @exception_code (IN) - target exception
1333 * @clear_exception (IN) - clear raised @exception_code exception
1334 * (0 = false, 1 = true)
1336 * Generic errors apply (see kfd_dbg_trap_operations).
1337 * Return - 0 on SUCCESS.
1338 * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
1339 * bytes of memory exception data to @info_ptr.
1340 * If @exception_code is EC_PROCESS_RUNTIME, copy saved
1341 * kfd_runtime_info to @info_ptr.
1342 * Actual required @info_ptr size (bytes) is returned in @info_size.
1344 struct kfd_ioctl_dbg_trap_query_exception_info_args {
1348 __u32 exception_code;
1349 __u32 clear_exception;
1353 * kfd_ioctl_dbg_trap_get_queue_snapshot_args
1355 * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
1356 * Get queue information.
1358 * @exception_mask (IN) - exceptions raised to clear
1359 * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
1360 * @num_queues (IN/OUT) - number of queue snapshot entries
1361 * The debugger specifies the size of the array allocated in @num_queues.
1362 * KFD returns the number of queues that actually existed. If this is
1363 * larger than the size specified by the debugger, KFD will not overflow
1364 * the array allocated by the debugger.
1366 * @entry_size (IN/OUT) - size per entry in bytes
1367 * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
1368 * @entry_size. KFD returns the number of bytes actually populated per
1369 * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
1370 * which fields in struct kfd_queue_snapshot_entry are valid. This allows
1371 * growing the ABI in a backwards compatible manner.
1372 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1373 * event that it's larger than actual kfd_queue_snapshot_entry.
1375 * Generic errors apply (see kfd_dbg_trap_operations).
1376 * Return - 0 on SUCCESS.
1377 * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
1378 * into @snapshot_buf_ptr if @num_queues(IN) > 0.
1379 * Otherwise return @num_queues(OUT) queue snapshot entries that exist.
1381 struct kfd_ioctl_dbg_trap_queue_snapshot_args {
1382 __u64 exception_mask;
1383 __u64 snapshot_buf_ptr;
1389 * kfd_ioctl_dbg_trap_get_device_snapshot_args
1391 * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
1392 * Get device information.
1394 * @exception_mask (IN) - exceptions raised to clear
1395 * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
1396 * @num_devices (IN/OUT) - number of debug devices to snapshot
1397 * The debugger specifies the size of the array allocated in @num_devices.
1398 * KFD returns the number of devices that actually existed. If this is
1399 * larger than the size specified by the debugger, KFD will not overflow
1400 * the array allocated by the debugger.
1402 * @entry_size (IN/OUT) - size per entry in bytes
1403 * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
1404 * @entry_size. KFD returns the number of bytes actually populated. The
1405 * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
1406 * in struct kfd_dbg_device_info_entry are valid. This allows growing the
1407 * ABI in a backwards compatible manner.
1408 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1409 * event that it's larger than actual kfd_dbg_device_info_entry.
1411 * Generic errors apply (see kfd_dbg_trap_operations).
1412 * Return - 0 on SUCCESS.
1413 * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
1414 * into @snapshot_buf_ptr if @num_devices(IN) > 0.
1415 * Otherwise return @num_devices(OUT) queue snapshot entries that exist.
1417 struct kfd_ioctl_dbg_trap_device_snapshot_args {
1418 __u64 exception_mask;
1419 __u64 snapshot_buf_ptr;
1425 * kfd_ioctl_dbg_trap_args
1427 * Arguments to debug target process.
1429 * @pid - target process to debug
1430 * @op - debug operation (see kfd_dbg_trap_operations)
1432 * @op determines which union struct args to use.
1433 * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
1435 struct kfd_ioctl_dbg_trap_args {
1440 struct kfd_ioctl_dbg_trap_enable_args enable;
1441 struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
1442 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
1443 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
1444 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
1445 struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
1446 struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
1447 struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
1448 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
1449 struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
1450 struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
1451 struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
1452 struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
1453 struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
1457 #define AMDKFD_IOCTL_BASE 'K'
1458 #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
1459 #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
1460 #define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
1461 #define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
1463 #define AMDKFD_IOC_GET_VERSION \
1464 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
1466 #define AMDKFD_IOC_CREATE_QUEUE \
1467 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
1469 #define AMDKFD_IOC_DESTROY_QUEUE \
1470 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
1472 #define AMDKFD_IOC_SET_MEMORY_POLICY \
1473 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
1475 #define AMDKFD_IOC_GET_CLOCK_COUNTERS \
1476 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
1478 #define AMDKFD_IOC_GET_PROCESS_APERTURES \
1479 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
1481 #define AMDKFD_IOC_UPDATE_QUEUE \
1482 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
1484 #define AMDKFD_IOC_CREATE_EVENT \
1485 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
1487 #define AMDKFD_IOC_DESTROY_EVENT \
1488 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
1490 #define AMDKFD_IOC_SET_EVENT \
1491 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
1493 #define AMDKFD_IOC_RESET_EVENT \
1494 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
1496 #define AMDKFD_IOC_WAIT_EVENTS \
1497 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
1499 #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \
1500 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
1502 #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \
1503 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
1505 #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \
1506 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
1508 #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \
1509 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
1511 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
1512 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
1514 #define AMDKFD_IOC_GET_TILE_CONFIG \
1515 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
1517 #define AMDKFD_IOC_SET_TRAP_HANDLER \
1518 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
1520 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
1522 struct kfd_ioctl_get_process_apertures_new_args)
1524 #define AMDKFD_IOC_ACQUIRE_VM \
1525 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
1527 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
1528 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
1530 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
1531 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
1533 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
1534 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
1536 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
1537 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
1539 #define AMDKFD_IOC_SET_CU_MASK \
1540 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
1542 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
1543 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
1545 #define AMDKFD_IOC_GET_DMABUF_INFO \
1546 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
1548 #define AMDKFD_IOC_IMPORT_DMABUF \
1549 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
1551 #define AMDKFD_IOC_ALLOC_QUEUE_GWS \
1552 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
1554 #define AMDKFD_IOC_SMI_EVENTS \
1555 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
1557 #define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
1559 #define AMDKFD_IOC_SET_XNACK_MODE \
1560 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
1562 #define AMDKFD_IOC_CRIU_OP \
1563 AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
1565 #define AMDKFD_IOC_AVAILABLE_MEMORY \
1566 AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
1568 #define AMDKFD_IOC_EXPORT_DMABUF \
1569 AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
1571 #define AMDKFD_IOC_RUNTIME_ENABLE \
1572 AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
1574 #define AMDKFD_IOC_DBG_TRAP \
1575 AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
1577 #define AMDKFD_COMMAND_START 0x01
1578 #define AMDKFD_COMMAND_END 0x27