1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2020 HabanaLabs, Ltd.
9 #include "../include/hw_ip/mmu/mmu_general.h"
10 #include "../include/hw_ip/mmu/mmu_v1_1.h"
11 #include "../include/gaudi/gaudi_masks.h"
12 #include "../include/gaudi/gaudi_fw_if.h"
13 #include "../include/gaudi/gaudi_reg_map.h"
14 #include "../include/gaudi/gaudi_async_ids_map_extended.h"
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/firmware.h>
19 #include <linux/hwmon.h>
20 #include <linux/genalloc.h>
21 #include <linux/io-64-nonatomic-lo-hi.h>
22 #include <linux/iommu.h>
23 #include <linux/seq_file.h>
26 * Gaudi security scheme:
28 * 1. Host is protected by:
32 * 2. DDR is protected by:
33 * - Range registers (protect the first 512MB)
35 * 3. Configuration is protected by:
39 * MMU is always enabled.
41 * QMAN DMA channels 0,1,5 (PCI DMAN):
42 * - DMA is not secured.
43 * - PQ and CQ are secured.
44 * - CP is secured: The driver needs to parse CB but WREG should be allowed
45 * because of TDMA (tensor DMA). Hence, WREG is always not
48 * When the driver needs to use DMA it will check that Gaudi is idle, set DMA
49 * channel 0 to be secured, execute the DMA and change it back to not secured.
50 * Currently, the driver doesn't use the DMA while there are compute jobs
53 * The current use cases for the driver to use the DMA are:
54 * - Clear SRAM on context switch (happens on context switch when device is
56 * - MMU page tables area clear (happens on init)
58 * QMAN DMA 2-4,6,7, TPC, MME, NIC:
59 * PQ is secured and is located on the Host (HBM CON TPC3 bug)
60 * CQ, CP and the engine are not secured
64 #define GAUDI_BOOT_FIT_FILE "/*(DEBLOBBED)*/"
65 #define GAUDI_LINUX_FW_FILE "/*(DEBLOBBED)*/"
66 #define GAUDI_TPC_FW_FILE "/*(DEBLOBBED)*/"
68 #define GAUDI_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
70 #define GAUDI_RESET_TIMEOUT_MSEC 1000 /* 1000ms */
71 #define GAUDI_RESET_WAIT_MSEC 1 /* 1ms */
72 #define GAUDI_CPU_RESET_WAIT_MSEC 200 /* 200ms */
73 #define GAUDI_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
75 #define GAUDI_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
76 #define GAUDI_PLDM_HRESET_TIMEOUT_MSEC 20000 /* 20s */
77 #define GAUDI_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
78 #define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
79 #define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
80 #define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
81 #define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
82 #define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
84 #define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
86 #define GAUDI_MAX_STRING_LEN 20
88 #define GAUDI_CB_POOL_CB_CNT 512
89 #define GAUDI_CB_POOL_CB_SIZE 0x20000 /* 128KB */
91 #define GAUDI_ALLOC_CPU_MEM_RETRY_CNT 3
93 #define GAUDI_NUM_OF_TPC_INTR_CAUSE 20
95 #define GAUDI_NUM_OF_QM_ERR_CAUSE 16
97 #define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
99 #define GAUDI_ARB_WDT_TIMEOUT 0x1000000
101 #define GAUDI_CLK_GATE_DEBUGFS_MASK (\
102 BIT(GAUDI_ENGINE_ID_MME_0) |\
103 BIT(GAUDI_ENGINE_ID_MME_2) |\
104 GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0))
106 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
107 "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
108 "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
109 "gaudi cq 5_0", "gaudi cq 5_1", "gaudi cq 5_2", "gaudi cq 5_3",
113 static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
114 [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
115 [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
116 [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5,
117 [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2,
118 [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3,
119 [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4,
120 [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6,
121 [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7
124 static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
125 [0] = GAUDI_QUEUE_ID_DMA_0_0,
126 [1] = GAUDI_QUEUE_ID_DMA_0_1,
127 [2] = GAUDI_QUEUE_ID_DMA_0_2,
128 [3] = GAUDI_QUEUE_ID_DMA_0_3,
129 [4] = GAUDI_QUEUE_ID_DMA_1_0,
130 [5] = GAUDI_QUEUE_ID_DMA_1_1,
131 [6] = GAUDI_QUEUE_ID_DMA_1_2,
132 [7] = GAUDI_QUEUE_ID_DMA_1_3,
133 [8] = GAUDI_QUEUE_ID_DMA_5_0,
134 [9] = GAUDI_QUEUE_ID_DMA_5_1,
135 [10] = GAUDI_QUEUE_ID_DMA_5_2,
136 [11] = GAUDI_QUEUE_ID_DMA_5_3
139 static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
140 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
141 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
142 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
143 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
144 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
145 [PACKET_REPEAT] = sizeof(struct packet_repeat),
146 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
147 [PACKET_FENCE] = sizeof(struct packet_fence),
148 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
149 [PACKET_NOP] = sizeof(struct packet_nop),
150 [PACKET_STOP] = sizeof(struct packet_stop),
151 [PACKET_ARB_POINT] = sizeof(struct packet_arb_point),
152 [PACKET_WAIT] = sizeof(struct packet_wait),
153 [PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
156 static inline bool validate_packet_id(enum packet_id id)
160 case PACKET_WREG_BULK:
161 case PACKET_MSG_LONG:
162 case PACKET_MSG_SHORT:
165 case PACKET_MSG_PROT:
170 case PACKET_ARB_POINT:
172 case PACKET_LOAD_AND_EXE:
179 static const char * const
180 gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = {
181 "tpc_address_exceed_slm",
183 "tpc_spu_mac_overflow",
184 "tpc_spu_addsub_overflow",
185 "tpc_spu_abs_overflow",
186 "tpc_spu_fp_dst_nan_inf",
187 "tpc_spu_fp_dst_denorm",
188 "tpc_vpu_mac_overflow",
189 "tpc_vpu_addsub_overflow",
190 "tpc_vpu_abs_overflow",
191 "tpc_vpu_fp_dst_nan_inf",
192 "tpc_vpu_fp_dst_denorm",
194 "tpc_illegal_instruction",
195 "tpc_pc_wrap_around",
203 static const char * const
204 gaudi_qman_error_cause[GAUDI_NUM_OF_QM_ERR_CAUSE] = {
208 "CP error due to undefined OPCODE",
209 "CP encountered STOP OPCODE",
211 "CP WRREG32 or WRBULK returned error",
213 "FENCE 0 inc over max value and clipped",
214 "FENCE 1 inc over max value and clipped",
215 "FENCE 2 inc over max value and clipped",
216 "FENCE 3 inc over max value and clipped",
217 "FENCE 0 dec under min value and clipped",
218 "FENCE 1 dec under min value and clipped",
219 "FENCE 2 dec under min value and clipped",
220 "FENCE 3 dec under min value and clipped"
223 static const char * const
224 gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = {
225 "Choice push while full error",
226 "Choice Q watchdog error",
227 "MSG AXI LBW returned with error"
230 static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
231 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */
232 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */
233 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_2 */
234 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_3 */
235 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_0 */
236 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_1 */
237 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_2 */
238 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_3 */
239 QUEUE_TYPE_CPU, /* GAUDI_QUEUE_ID_CPU_PQ */
240 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_0 */
241 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_1 */
242 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_2 */
243 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_3 */
244 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_0 */
245 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_1 */
246 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_2 */
247 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_3 */
248 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_0 */
249 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_1 */
250 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_2 */
251 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_3 */
252 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_0 */
253 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_1 */
254 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_2 */
255 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_3 */
256 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_0 */
257 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_1 */
258 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_2 */
259 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_3 */
260 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_0 */
261 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_1 */
262 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_2 */
263 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_3 */
264 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_0 */
265 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_1 */
266 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_2 */
267 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_3 */
268 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_0 */
269 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_1 */
270 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_2 */
271 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_3 */
272 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_0 */
273 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_1 */
274 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_2 */
275 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_3 */
276 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_0 */
277 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_1 */
278 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_2 */
279 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_3 */
280 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_0 */
281 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_1 */
282 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_2 */
283 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_3 */
284 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_0 */
285 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_1 */
286 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_2 */
287 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_3 */
288 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_0 */
289 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_1 */
290 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_2 */
291 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_3 */
292 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_0 */
293 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_1 */
294 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_2 */
295 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_3 */
296 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_0 */
297 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_1 */
298 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_2 */
299 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_3 */
300 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_0 */
301 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_1 */
302 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_2 */
303 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_3 */
304 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_0 */
305 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_1 */
306 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_2 */
307 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_3 */
308 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_0 */
309 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_1 */
310 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_2 */
311 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_3 */
312 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_0 */
313 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_1 */
314 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_2 */
315 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_3 */
316 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_0 */
317 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_1 */
318 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_2 */
319 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_3 */
320 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_0 */
321 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_1 */
322 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_2 */
323 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_3 */
324 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_0 */
325 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_1 */
326 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_2 */
327 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_3 */
328 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_0 */
329 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_1 */
330 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_2 */
331 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_3 */
332 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_0 */
333 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_1 */
334 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_2 */
335 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_3 */
336 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_0 */
337 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_1 */
338 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_2 */
339 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_3 */
340 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_0 */
341 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_1 */
342 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_2 */
343 QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_3 */
346 struct ecc_info_extract_params {
350 bool disable_clock_gating;
353 static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
355 static int gaudi_send_job_on_qman0(struct hl_device *hdev,
356 struct hl_cs_job *job);
357 static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
359 static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
361 static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
362 static int gaudi_cpucp_info_get(struct hl_device *hdev);
363 static void gaudi_disable_clock_gating(struct hl_device *hdev);
364 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
366 static int gaudi_get_fixed_properties(struct hl_device *hdev)
368 struct asic_fixed_properties *prop = &hdev->asic_prop;
369 u32 num_sync_stream_queues = 0;
372 prop->max_queues = GAUDI_QUEUE_ID_SIZE;
373 prop->hw_queues_props = kcalloc(prop->max_queues,
374 sizeof(struct hw_queue_properties),
377 if (!prop->hw_queues_props)
380 for (i = 0 ; i < prop->max_queues ; i++) {
381 if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) {
382 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
383 prop->hw_queues_props[i].driver_only = 0;
384 prop->hw_queues_props[i].requires_kernel_cb = 1;
385 prop->hw_queues_props[i].supports_sync_stream = 1;
386 num_sync_stream_queues++;
387 } else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
388 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
389 prop->hw_queues_props[i].driver_only = 1;
390 prop->hw_queues_props[i].requires_kernel_cb = 0;
391 prop->hw_queues_props[i].supports_sync_stream = 0;
392 } else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) {
393 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
394 prop->hw_queues_props[i].driver_only = 0;
395 prop->hw_queues_props[i].requires_kernel_cb = 0;
396 } else if (gaudi_queue_type[i] == QUEUE_TYPE_NA) {
397 prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
398 prop->hw_queues_props[i].driver_only = 0;
399 prop->hw_queues_props[i].requires_kernel_cb = 0;
400 prop->hw_queues_props[i].supports_sync_stream = 0;
404 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
405 prop->sync_stream_first_sob = 0;
406 prop->sync_stream_first_mon = 0;
407 prop->dram_base_address = DRAM_PHYS_BASE;
408 prop->dram_size = GAUDI_HBM_SIZE_32GB;
409 prop->dram_end_address = prop->dram_base_address +
411 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
413 prop->sram_base_address = SRAM_BASE_ADDR;
414 prop->sram_size = SRAM_SIZE;
415 prop->sram_end_address = prop->sram_base_address +
417 prop->sram_user_base_address = prop->sram_base_address +
418 SRAM_USER_BASE_OFFSET;
420 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
422 prop->mmu_pgt_size = 0x800000; /* 8MB */
424 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
425 prop->mmu_pte_size = HL_PTE_SIZE;
426 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
427 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
428 prop->dram_page_size = PAGE_SIZE_2MB;
430 prop->pmmu.hop0_shift = HOP0_SHIFT;
431 prop->pmmu.hop1_shift = HOP1_SHIFT;
432 prop->pmmu.hop2_shift = HOP2_SHIFT;
433 prop->pmmu.hop3_shift = HOP3_SHIFT;
434 prop->pmmu.hop4_shift = HOP4_SHIFT;
435 prop->pmmu.hop0_mask = HOP0_MASK;
436 prop->pmmu.hop1_mask = HOP1_MASK;
437 prop->pmmu.hop2_mask = HOP2_MASK;
438 prop->pmmu.hop3_mask = HOP3_MASK;
439 prop->pmmu.hop4_mask = HOP4_MASK;
440 prop->pmmu.start_addr = VA_HOST_SPACE_START;
441 prop->pmmu.end_addr =
442 (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
443 prop->pmmu.page_size = PAGE_SIZE_4KB;
444 prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
446 /* PMMU and HPMMU are the same except of page size */
447 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
448 prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
450 /* shifts and masks are the same in PMMU and DMMU */
451 memcpy(&prop->dmmu, &prop->pmmu, sizeof(prop->pmmu));
452 prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2);
453 prop->dmmu.end_addr = VA_HOST_SPACE_END;
454 prop->dmmu.page_size = PAGE_SIZE_2MB;
456 prop->cfg_size = CFG_SIZE;
457 prop->max_asid = MAX_ASID;
458 prop->num_of_events = GAUDI_EVENT_SIZE;
459 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
461 prop->max_power_default = MAX_POWER_DEFAULT_PCI;
463 prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
464 prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
466 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
467 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
469 strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
472 prop->max_pending_cs = GAUDI_MAX_PENDING_CS;
474 prop->first_available_user_sob[HL_GAUDI_WS_DCORE] =
475 num_sync_stream_queues * HL_RSVD_SOBS;
476 prop->first_available_user_mon[HL_GAUDI_WS_DCORE] =
477 num_sync_stream_queues * HL_RSVD_MONS;
482 static int gaudi_pci_bars_map(struct hl_device *hdev)
484 static const char * const name[] = {"SRAM", "CFG", "HBM"};
485 bool is_wc[3] = {false, false, true};
488 rc = hl_pci_bars_map(hdev, name, is_wc);
492 hdev->rmmio = hdev->pcie_bar[CFG_BAR_ID] +
493 (CFG_BASE - SPI_FLASH_BASE_ADDR);
498 static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
500 struct gaudi_device *gaudi = hdev->asic_specific;
501 struct hl_inbound_pci_region pci_region;
505 if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr))
508 /* Inbound Region 2 - Bar 4 - Point to HBM */
509 pci_region.mode = PCI_BAR_MATCH_MODE;
510 pci_region.bar = HBM_BAR_ID;
511 pci_region.addr = addr;
512 rc = hl_pci_set_inbound_region(hdev, 2, &pci_region);
517 old_addr = gaudi->hbm_bar_cur_addr;
518 gaudi->hbm_bar_cur_addr = addr;
524 static int gaudi_init_iatu(struct hl_device *hdev)
526 struct hl_inbound_pci_region inbound_region;
527 struct hl_outbound_pci_region outbound_region;
530 /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */
531 inbound_region.mode = PCI_BAR_MATCH_MODE;
532 inbound_region.bar = SRAM_BAR_ID;
533 inbound_region.addr = SRAM_BASE_ADDR;
534 rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
538 /* Inbound Region 1 - Bar 2 - Point to SPI FLASH */
539 inbound_region.mode = PCI_BAR_MATCH_MODE;
540 inbound_region.bar = CFG_BAR_ID;
541 inbound_region.addr = SPI_FLASH_BASE_ADDR;
542 rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
546 /* Inbound Region 2 - Bar 4 - Point to HBM */
547 inbound_region.mode = PCI_BAR_MATCH_MODE;
548 inbound_region.bar = HBM_BAR_ID;
549 inbound_region.addr = DRAM_PHYS_BASE;
550 rc = hl_pci_set_inbound_region(hdev, 2, &inbound_region);
554 hdev->asic_funcs->set_dma_mask_from_fw(hdev);
556 /* Outbound Region 0 - Point to Host */
557 outbound_region.addr = HOST_PHYS_BASE;
558 outbound_region.size = HOST_PHYS_SIZE;
559 rc = hl_pci_set_outbound_region(hdev, &outbound_region);
565 static int gaudi_early_init(struct hl_device *hdev)
567 struct asic_fixed_properties *prop = &hdev->asic_prop;
568 struct pci_dev *pdev = hdev->pdev;
571 rc = gaudi_get_fixed_properties(hdev);
573 dev_err(hdev->dev, "Failed to get fixed properties\n");
577 /* Check BAR sizes */
578 if (pci_resource_len(pdev, SRAM_BAR_ID) != SRAM_BAR_SIZE) {
580 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
582 (unsigned long long) pci_resource_len(pdev,
586 goto free_queue_props;
589 if (pci_resource_len(pdev, CFG_BAR_ID) != CFG_BAR_SIZE) {
591 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
593 (unsigned long long) pci_resource_len(pdev,
597 goto free_queue_props;
600 prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
602 rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
603 mmCPU_BOOT_ERR0, GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
605 goto free_queue_props;
607 /* GAUDI Firmware does not yet support security */
608 prop->fw_security_disabled = true;
609 dev_info(hdev->dev, "firmware-level security is disabled\n");
614 kfree(hdev->asic_prop.hw_queues_props);
618 static int gaudi_early_fini(struct hl_device *hdev)
620 kfree(hdev->asic_prop.hw_queues_props);
627 * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
629 * @hdev: pointer to hl_device structure
632 static void gaudi_fetch_psoc_frequency(struct hl_device *hdev)
634 struct asic_fixed_properties *prop = &hdev->asic_prop;
637 u32 div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
638 u32 div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
639 u32 nr = RREG32(mmPSOC_CPU_PLL_NR);
640 u32 nf = RREG32(mmPSOC_CPU_PLL_NF);
641 u32 od = RREG32(mmPSOC_CPU_PLL_OD);
643 if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
644 if (div_sel == DIV_SEL_REF_CLK)
645 trace_freq = PLL_REF_CLK;
647 trace_freq = PLL_REF_CLK / (div_fctr + 1);
648 } else if (div_sel == DIV_SEL_PLL_CLK ||
649 div_sel == DIV_SEL_DIVIDED_PLL) {
650 pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
651 if (div_sel == DIV_SEL_PLL_CLK)
652 trace_freq = pll_clk;
654 trace_freq = pll_clk / (div_fctr + 1);
657 "Received invalid div select value: %d", div_sel);
660 prop->psoc_timestamp_frequency = trace_freq;
661 prop->psoc_pci_pll_nr = nr;
662 prop->psoc_pci_pll_nf = nf;
663 prop->psoc_pci_pll_od = od;
664 prop->psoc_pci_pll_div_factor = div_fctr;
667 static int _gaudi_init_tpc_mem(struct hl_device *hdev,
668 dma_addr_t tpc_kernel_src_addr, u32 tpc_kernel_size)
670 struct asic_fixed_properties *prop = &hdev->asic_prop;
671 struct packet_lin_dma *init_tpc_mem_pkt;
672 struct hl_cs_job *job;
679 cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
683 init_tpc_mem_pkt = cb->kernel_address;
684 cb_size = sizeof(*init_tpc_mem_pkt);
685 memset(init_tpc_mem_pkt, 0, cb_size);
687 init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size);
689 ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
690 ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1);
691 ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
692 ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
694 init_tpc_mem_pkt->ctl = cpu_to_le32(ctl);
696 init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr);
697 dst_addr = (prop->sram_user_base_address &
698 GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
699 GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
700 init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr);
702 job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
704 dev_err(hdev->dev, "Failed to allocate a new job\n");
711 job->user_cb->cs_cnt++;
712 job->user_cb_size = cb_size;
713 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
714 job->patched_cb = job->user_cb;
715 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
717 hl_debugfs_add_job(hdev, job);
719 rc = gaudi_send_job_on_qman0(hdev, job);
724 for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
725 rc = gaudi_run_tpc_kernel(hdev, dst_addr, tpc_id);
731 hl_userptr_delete_list(hdev, &job->userptr_list);
732 hl_debugfs_remove_job(hdev, job);
738 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
744 * gaudi_init_tpc_mem() - Initialize TPC memories.
745 * @hdev: Pointer to hl_device structure.
747 * Copy TPC kernel fw from firmware file and run it to initialize TPC memories.
749 * Return: 0 for success, negative value for error.
751 static int gaudi_init_tpc_mem(struct hl_device *hdev)
753 const struct firmware *fw;
756 dma_addr_t dma_handle;
760 rc = reject_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
761 if (rc == -EINTR && count-- > 0) {
767 dev_err(hdev->dev, "Failed to load firmware file %s\n",
773 cpu_addr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, fw_size,
774 &dma_handle, GFP_KERNEL | __GFP_ZERO);
777 "Failed to allocate %zu of dma memory for TPC kernel\n",
783 memcpy(cpu_addr, fw->data, fw_size);
785 rc = _gaudi_init_tpc_mem(hdev, dma_handle, fw_size);
787 hdev->asic_funcs->asic_dma_free_coherent(hdev, fw->size, cpu_addr,
791 release_firmware(fw);
795 static int gaudi_late_init(struct hl_device *hdev)
797 struct gaudi_device *gaudi = hdev->asic_specific;
800 rc = gaudi->cpucp_info_get(hdev);
802 dev_err(hdev->dev, "Failed to get cpucp info\n");
806 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS);
808 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
812 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
814 gaudi_fetch_psoc_frequency(hdev);
816 rc = gaudi_mmu_clear_pgt_range(hdev);
818 dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
819 goto disable_pci_access;
822 rc = gaudi_init_tpc_mem(hdev);
824 dev_err(hdev->dev, "Failed to initialize TPC memories\n");
825 goto disable_pci_access;
831 hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
836 static void gaudi_late_fini(struct hl_device *hdev)
838 const struct hwmon_channel_info **channel_info_arr;
841 if (!hdev->hl_chip_info->info)
844 channel_info_arr = hdev->hl_chip_info->info;
846 while (channel_info_arr[i]) {
847 kfree(channel_info_arr[i]->config);
848 kfree(channel_info_arr[i]);
852 kfree(channel_info_arr);
854 hdev->hl_chip_info->info = NULL;
857 static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
859 dma_addr_t dma_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {}, end_addr;
860 void *virt_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {};
864 * The device CPU works with 40-bits addresses, while bit 39 must be set
865 * to '1' when accessing the host.
866 * Bits 49:39 of the full host address are saved for a later
867 * configuration of the HW to perform extension to 50 bits.
868 * Because there is a single HW register that holds the extension bits,
869 * these bits must be identical in all allocated range.
872 for (i = 0 ; i < GAUDI_ALLOC_CPU_MEM_RETRY_CNT ; i++) {
874 hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
875 HL_CPU_ACCESSIBLE_MEM_SIZE,
877 GFP_KERNEL | __GFP_ZERO);
878 if (!virt_addr_arr[i]) {
880 goto free_dma_mem_arr;
883 end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1;
884 if (GAUDI_CPU_PCI_MSB_ADDR(dma_addr_arr[i]) ==
885 GAUDI_CPU_PCI_MSB_ADDR(end_addr))
889 if (i == GAUDI_ALLOC_CPU_MEM_RETRY_CNT) {
891 "MSB of CPU accessible DMA memory are not identical in all range\n");
893 goto free_dma_mem_arr;
896 hdev->cpu_accessible_dma_mem = virt_addr_arr[i];
897 hdev->cpu_accessible_dma_address = dma_addr_arr[i];
898 hdev->cpu_pci_msb_addr =
899 GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address);
901 GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address);
904 for (j = 0 ; j < i ; j++)
905 hdev->asic_funcs->asic_dma_free_coherent(hdev,
906 HL_CPU_ACCESSIBLE_MEM_SIZE,
913 static void gaudi_free_internal_qmans_pq_mem(struct hl_device *hdev)
915 struct gaudi_device *gaudi = hdev->asic_specific;
916 struct gaudi_internal_qman_info *q;
919 for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
920 q = &gaudi->internal_qmans[i];
921 if (!q->pq_kernel_addr)
923 hdev->asic_funcs->asic_dma_free_coherent(hdev, q->pq_size,
929 static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev)
931 struct gaudi_device *gaudi = hdev->asic_specific;
932 struct gaudi_internal_qman_info *q;
935 for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
936 if (gaudi_queue_type[i] != QUEUE_TYPE_INT)
939 q = &gaudi->internal_qmans[i];
942 case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_4_3:
943 case GAUDI_QUEUE_ID_DMA_6_0 ... GAUDI_QUEUE_ID_DMA_7_3:
944 q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES;
946 case GAUDI_QUEUE_ID_MME_0_0 ... GAUDI_QUEUE_ID_MME_1_3:
947 q->pq_size = MME_QMAN_SIZE_IN_BYTES;
949 case GAUDI_QUEUE_ID_TPC_0_0 ... GAUDI_QUEUE_ID_TPC_7_3:
950 q->pq_size = TPC_QMAN_SIZE_IN_BYTES;
953 dev_err(hdev->dev, "Bad internal queue index %d", i);
955 goto free_internal_qmans_pq_mem;
958 q->pq_kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
961 GFP_KERNEL | __GFP_ZERO);
962 if (!q->pq_kernel_addr) {
964 goto free_internal_qmans_pq_mem;
970 free_internal_qmans_pq_mem:
971 gaudi_free_internal_qmans_pq_mem(hdev);
975 static int gaudi_sw_init(struct hl_device *hdev)
977 struct gaudi_device *gaudi;
981 /* Allocate device structure */
982 gaudi = kzalloc(sizeof(*gaudi), GFP_KERNEL);
986 for (i = 0 ; i < ARRAY_SIZE(gaudi_irq_map_table) ; i++) {
987 if (gaudi_irq_map_table[i].valid) {
988 if (event_id == GAUDI_EVENT_SIZE) {
990 "Event array exceeds the limit of %u events\n",
993 goto free_gaudi_device;
996 gaudi->events[event_id++] =
997 gaudi_irq_map_table[i].fc_id;
1001 gaudi->cpucp_info_get = gaudi_cpucp_info_get;
1003 gaudi->max_freq_value = GAUDI_MAX_CLK_FREQ;
1005 hdev->asic_specific = gaudi;
1007 /* Create DMA pool for small allocations */
1008 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
1009 &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
1010 if (!hdev->dma_pool) {
1011 dev_err(hdev->dev, "failed to create DMA pool\n");
1013 goto free_gaudi_device;
1016 rc = gaudi_alloc_cpu_accessible_dma_mem(hdev);
1020 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
1021 if (!hdev->cpu_accessible_dma_pool) {
1023 "Failed to create CPU accessible DMA pool\n");
1025 goto free_cpu_dma_mem;
1028 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
1029 (uintptr_t) hdev->cpu_accessible_dma_mem,
1030 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
1033 "Failed to add memory to CPU accessible DMA pool\n");
1035 goto free_cpu_accessible_dma_pool;
1038 rc = gaudi_alloc_internal_qmans_pq_mem(hdev);
1040 goto free_cpu_accessible_dma_pool;
1042 spin_lock_init(&gaudi->hw_queues_lock);
1043 mutex_init(&gaudi->clk_gate_mutex);
1045 hdev->supports_sync_stream = true;
1046 hdev->supports_coresight = true;
1050 free_cpu_accessible_dma_pool:
1051 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
1053 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
1054 hdev->cpu_pci_msb_addr);
1055 hdev->asic_funcs->asic_dma_free_coherent(hdev,
1056 HL_CPU_ACCESSIBLE_MEM_SIZE,
1057 hdev->cpu_accessible_dma_mem,
1058 hdev->cpu_accessible_dma_address);
1060 dma_pool_destroy(hdev->dma_pool);
1066 static int gaudi_sw_fini(struct hl_device *hdev)
1068 struct gaudi_device *gaudi = hdev->asic_specific;
1070 gaudi_free_internal_qmans_pq_mem(hdev);
1072 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
1074 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
1075 hdev->cpu_pci_msb_addr);
1076 hdev->asic_funcs->asic_dma_free_coherent(hdev,
1077 HL_CPU_ACCESSIBLE_MEM_SIZE,
1078 hdev->cpu_accessible_dma_mem,
1079 hdev->cpu_accessible_dma_address);
1081 dma_pool_destroy(hdev->dma_pool);
1083 mutex_destroy(&gaudi->clk_gate_mutex);
1090 static irqreturn_t gaudi_irq_handler_single(int irq, void *arg)
1092 struct hl_device *hdev = arg;
1098 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1099 hl_irq_handler_cq(irq, &hdev->completion_queue[i]);
1101 hl_irq_handler_eq(irq, &hdev->event_queue);
1107 * For backward compatibility, new MSI interrupts should be set after the
1108 * existing CPU and NIC interrupts.
1110 static int gaudi_pci_irq_vector(struct hl_device *hdev, unsigned int nr,
1115 if ((nr != GAUDI_EVENT_QUEUE_MSI_IDX) && (cpu_eq))
1116 dev_crit(hdev->dev, "CPU EQ must use IRQ %d\n",
1117 GAUDI_EVENT_QUEUE_MSI_IDX);
1119 msi_vec = ((nr < GAUDI_EVENT_QUEUE_MSI_IDX) || (cpu_eq)) ? nr :
1120 (nr + NIC_NUMBER_OF_ENGINES + 1);
1122 return pci_irq_vector(hdev->pdev, msi_vec);
1125 static int gaudi_enable_msi_single(struct hl_device *hdev)
1129 dev_info(hdev->dev, "Working in single MSI IRQ mode\n");
1131 irq = gaudi_pci_irq_vector(hdev, 0, false);
1132 rc = request_irq(irq, gaudi_irq_handler_single, 0,
1133 "gaudi single msi", hdev);
1136 "Failed to request single MSI IRQ\n");
1141 static int gaudi_enable_msi_multi(struct hl_device *hdev)
1143 int cq_cnt = hdev->asic_prop.completion_queues_count;
1144 int rc, i, irq_cnt_init, irq;
1146 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
1147 irq = gaudi_pci_irq_vector(hdev, i, false);
1148 rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi_irq_name[i],
1149 &hdev->completion_queue[i]);
1151 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
1156 irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX, true);
1157 rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi_irq_name[cq_cnt],
1158 &hdev->event_queue);
1160 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
1167 for (i = 0 ; i < irq_cnt_init ; i++)
1168 free_irq(gaudi_pci_irq_vector(hdev, i, false),
1169 &hdev->completion_queue[i]);
1173 static int gaudi_enable_msi(struct hl_device *hdev)
1175 struct gaudi_device *gaudi = hdev->asic_specific;
1178 if (gaudi->hw_cap_initialized & HW_CAP_MSI)
1181 rc = pci_alloc_irq_vectors(hdev->pdev, 1, GAUDI_MSI_ENTRIES,
1184 dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc);
1188 if (rc < NUMBER_OF_INTERRUPTS) {
1189 gaudi->multi_msi_mode = false;
1190 rc = gaudi_enable_msi_single(hdev);
1192 gaudi->multi_msi_mode = true;
1193 rc = gaudi_enable_msi_multi(hdev);
1197 goto free_pci_irq_vectors;
1199 gaudi->hw_cap_initialized |= HW_CAP_MSI;
1203 free_pci_irq_vectors:
1204 pci_free_irq_vectors(hdev->pdev);
1208 static void gaudi_sync_irqs(struct hl_device *hdev)
1210 struct gaudi_device *gaudi = hdev->asic_specific;
1211 int i, cq_cnt = hdev->asic_prop.completion_queues_count;
1213 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
1216 /* Wait for all pending IRQs to be finished */
1217 if (gaudi->multi_msi_mode) {
1218 for (i = 0 ; i < cq_cnt ; i++)
1219 synchronize_irq(gaudi_pci_irq_vector(hdev, i, false));
1221 synchronize_irq(gaudi_pci_irq_vector(hdev,
1222 GAUDI_EVENT_QUEUE_MSI_IDX,
1225 synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false));
1229 static void gaudi_disable_msi(struct hl_device *hdev)
1231 struct gaudi_device *gaudi = hdev->asic_specific;
1232 int i, irq, cq_cnt = hdev->asic_prop.completion_queues_count;
1234 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
1237 gaudi_sync_irqs(hdev);
1239 if (gaudi->multi_msi_mode) {
1240 irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX,
1242 free_irq(irq, &hdev->event_queue);
1244 for (i = 0 ; i < cq_cnt ; i++) {
1245 irq = gaudi_pci_irq_vector(hdev, i, false);
1246 free_irq(irq, &hdev->completion_queue[i]);
1249 free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev);
1252 pci_free_irq_vectors(hdev->pdev);
1254 gaudi->hw_cap_initialized &= ~HW_CAP_MSI;
1257 static void gaudi_init_scrambler_sram(struct hl_device *hdev)
1259 struct gaudi_device *gaudi = hdev->asic_specific;
1261 if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER)
1264 if (!hdev->sram_scrambler_enable)
1267 WREG32(mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN,
1268 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1269 WREG32(mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN,
1270 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1271 WREG32(mmNIF_RTR_CTRL_2_SCRAM_SRAM_EN,
1272 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1273 WREG32(mmNIF_RTR_CTRL_3_SCRAM_SRAM_EN,
1274 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1275 WREG32(mmNIF_RTR_CTRL_4_SCRAM_SRAM_EN,
1276 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1277 WREG32(mmNIF_RTR_CTRL_5_SCRAM_SRAM_EN,
1278 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1279 WREG32(mmNIF_RTR_CTRL_6_SCRAM_SRAM_EN,
1280 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1281 WREG32(mmNIF_RTR_CTRL_7_SCRAM_SRAM_EN,
1282 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1284 WREG32(mmSIF_RTR_CTRL_0_SCRAM_SRAM_EN,
1285 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1286 WREG32(mmSIF_RTR_CTRL_1_SCRAM_SRAM_EN,
1287 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1288 WREG32(mmSIF_RTR_CTRL_2_SCRAM_SRAM_EN,
1289 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1290 WREG32(mmSIF_RTR_CTRL_3_SCRAM_SRAM_EN,
1291 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1292 WREG32(mmSIF_RTR_CTRL_4_SCRAM_SRAM_EN,
1293 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1294 WREG32(mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN,
1295 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1296 WREG32(mmSIF_RTR_CTRL_6_SCRAM_SRAM_EN,
1297 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1298 WREG32(mmSIF_RTR_CTRL_7_SCRAM_SRAM_EN,
1299 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
1301 WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_SRAM_EN,
1302 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
1303 WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_SRAM_EN,
1304 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
1305 WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_SRAM_EN,
1306 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
1307 WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_SRAM_EN,
1308 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
1309 WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_SRAM_EN,
1310 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
1311 WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN,
1312 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
1313 WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_SRAM_EN,
1314 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
1315 WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_SRAM_EN,
1316 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
1318 gaudi->hw_cap_initialized |= HW_CAP_SRAM_SCRAMBLER;
1321 static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
1323 struct gaudi_device *gaudi = hdev->asic_specific;
1325 if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER)
1328 if (!hdev->dram_scrambler_enable)
1331 WREG32(mmNIF_RTR_CTRL_0_SCRAM_HBM_EN,
1332 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1333 WREG32(mmNIF_RTR_CTRL_1_SCRAM_HBM_EN,
1334 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1335 WREG32(mmNIF_RTR_CTRL_2_SCRAM_HBM_EN,
1336 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1337 WREG32(mmNIF_RTR_CTRL_3_SCRAM_HBM_EN,
1338 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1339 WREG32(mmNIF_RTR_CTRL_4_SCRAM_HBM_EN,
1340 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1341 WREG32(mmNIF_RTR_CTRL_5_SCRAM_HBM_EN,
1342 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1343 WREG32(mmNIF_RTR_CTRL_6_SCRAM_HBM_EN,
1344 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1345 WREG32(mmNIF_RTR_CTRL_7_SCRAM_HBM_EN,
1346 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1348 WREG32(mmSIF_RTR_CTRL_0_SCRAM_HBM_EN,
1349 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1350 WREG32(mmSIF_RTR_CTRL_1_SCRAM_HBM_EN,
1351 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1352 WREG32(mmSIF_RTR_CTRL_2_SCRAM_HBM_EN,
1353 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1354 WREG32(mmSIF_RTR_CTRL_3_SCRAM_HBM_EN,
1355 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1356 WREG32(mmSIF_RTR_CTRL_4_SCRAM_HBM_EN,
1357 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1358 WREG32(mmSIF_RTR_CTRL_5_SCRAM_HBM_EN,
1359 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1360 WREG32(mmSIF_RTR_CTRL_6_SCRAM_HBM_EN,
1361 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1362 WREG32(mmSIF_RTR_CTRL_7_SCRAM_HBM_EN,
1363 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
1365 WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_HBM_EN,
1366 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
1367 WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_HBM_EN,
1368 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
1369 WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_HBM_EN,
1370 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
1371 WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_HBM_EN,
1372 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
1373 WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_HBM_EN,
1374 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
1375 WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN,
1376 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
1377 WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_HBM_EN,
1378 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
1379 WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_HBM_EN,
1380 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
1382 gaudi->hw_cap_initialized |= HW_CAP_HBM_SCRAMBLER;
1385 static void gaudi_init_e2e(struct hl_device *hdev)
1387 WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3);
1388 WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 785 >> 3);
1389 WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 49);
1390 WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 101);
1392 WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
1393 WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
1394 WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
1395 WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
1397 WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
1398 WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
1399 WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
1400 WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
1402 WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
1403 WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
1404 WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
1405 WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
1407 WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
1408 WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
1409 WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
1410 WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
1412 WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
1413 WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
1414 WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
1415 WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
1417 WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
1418 WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
1419 WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
1420 WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
1422 WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 297 >> 3);
1423 WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 908 >> 3);
1424 WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 19);
1425 WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 19);
1427 WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 318 >> 3);
1428 WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 956 >> 3);
1429 WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 79);
1430 WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 163);
1432 WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
1433 WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
1434 WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
1435 WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
1437 WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
1438 WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
1439 WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
1440 WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
1442 WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
1443 WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
1444 WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
1445 WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
1447 WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
1448 WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
1449 WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
1450 WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
1452 WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
1453 WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
1454 WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
1455 WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
1457 WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
1458 WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
1459 WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
1460 WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
1462 WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 318 >> 3);
1463 WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 956 >> 3);
1464 WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 79);
1465 WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 79);
1467 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
1468 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
1469 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
1470 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
1472 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
1473 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
1474 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
1475 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
1477 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
1478 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
1479 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
1480 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
1482 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
1483 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
1484 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
1485 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
1487 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
1488 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
1489 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
1490 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
1492 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
1493 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
1494 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
1495 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
1497 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
1498 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
1499 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
1500 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
1502 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
1503 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
1504 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
1505 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
1507 if (!hdev->dram_scrambler_enable) {
1508 WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
1509 WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
1510 WREG32(mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
1511 WREG32(mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
1513 WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
1514 WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
1515 WREG32(mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
1516 WREG32(mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
1518 WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
1519 WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
1520 WREG32(mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
1521 WREG32(mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
1523 WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
1524 WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
1525 WREG32(mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
1526 WREG32(mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
1528 WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
1529 WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
1530 WREG32(mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
1531 WREG32(mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
1533 WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
1534 WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
1535 WREG32(mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
1536 WREG32(mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
1538 WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
1539 WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
1540 WREG32(mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
1541 WREG32(mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
1543 WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
1544 WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
1545 WREG32(mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
1546 WREG32(mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
1548 WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
1549 WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
1550 WREG32(mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
1551 WREG32(mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
1553 WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
1554 WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
1555 WREG32(mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
1556 WREG32(mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
1558 WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
1559 WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
1560 WREG32(mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
1561 WREG32(mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
1563 WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
1564 WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
1565 WREG32(mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
1566 WREG32(mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
1568 WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
1569 WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
1570 WREG32(mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
1571 WREG32(mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
1573 WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
1574 WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
1575 WREG32(mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
1576 WREG32(mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
1578 WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
1579 WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
1580 WREG32(mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
1581 WREG32(mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
1583 WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
1584 WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
1585 WREG32(mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
1586 WREG32(mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
1588 WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
1589 WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
1590 WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
1591 WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
1593 WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
1594 WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
1595 WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
1596 WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
1598 WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
1599 WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
1600 WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
1601 WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
1603 WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
1604 WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
1605 WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
1606 WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
1608 WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
1609 WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
1610 WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
1611 WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
1613 WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
1614 WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
1615 WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
1616 WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
1618 WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
1619 WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
1620 WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
1621 WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
1623 WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
1624 WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
1625 WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
1626 WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
1629 WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_EN,
1630 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1631 WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_EN,
1632 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1634 WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_EN,
1635 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1636 WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_EN,
1637 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1639 WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_EN,
1640 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1641 WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_EN,
1642 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1644 WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_EN,
1645 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1646 WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_EN,
1647 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1649 WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_EN,
1650 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1651 WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_EN,
1652 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1654 WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_EN,
1655 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1656 WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_EN,
1657 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1659 WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_EN,
1660 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1661 WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_EN,
1662 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1664 WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_EN,
1665 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1666 WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_EN,
1667 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1669 WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_EN,
1670 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1671 WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_EN,
1672 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1674 WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_EN,
1675 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1676 WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_EN,
1677 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1679 WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_EN,
1680 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1681 WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_EN,
1682 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1684 WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_EN,
1685 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1686 WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_EN,
1687 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1689 WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_EN,
1690 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1691 WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_EN,
1692 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1694 WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_EN,
1695 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1696 WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_EN,
1697 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1699 WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_EN,
1700 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1701 WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_EN,
1702 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1704 WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_EN,
1705 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
1706 WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_EN,
1707 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
1709 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_EN,
1710 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
1711 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_EN,
1712 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
1714 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_EN,
1715 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
1716 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_EN,
1717 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
1719 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_EN,
1720 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
1721 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_EN,
1722 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
1724 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_EN,
1725 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
1726 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_EN,
1727 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
1729 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_EN,
1730 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
1731 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_EN,
1732 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
1734 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN,
1735 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
1736 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN,
1737 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
1739 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_EN,
1740 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
1741 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_EN,
1742 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
1744 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_EN,
1745 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
1746 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_EN,
1747 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
1750 static void gaudi_init_hbm_cred(struct hl_device *hdev)
1752 uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
1754 hbm0_wr = 0x33333333;
1755 hbm0_rd = 0x77777777;
1756 hbm1_wr = 0x55555555;
1757 hbm1_rd = 0xDDDDDDDD;
1759 WREG32(mmDMA_IF_E_N_HBM0_WR_CRED_CNT, hbm0_wr);
1760 WREG32(mmDMA_IF_E_N_HBM1_WR_CRED_CNT, hbm1_wr);
1761 WREG32(mmDMA_IF_E_N_HBM0_RD_CRED_CNT, hbm0_rd);
1762 WREG32(mmDMA_IF_E_N_HBM1_RD_CRED_CNT, hbm1_rd);
1764 WREG32(mmDMA_IF_E_S_HBM0_WR_CRED_CNT, hbm0_wr);
1765 WREG32(mmDMA_IF_E_S_HBM1_WR_CRED_CNT, hbm1_wr);
1766 WREG32(mmDMA_IF_E_S_HBM0_RD_CRED_CNT, hbm0_rd);
1767 WREG32(mmDMA_IF_E_S_HBM1_RD_CRED_CNT, hbm1_rd);
1769 WREG32(mmDMA_IF_W_N_HBM0_WR_CRED_CNT, hbm0_wr);
1770 WREG32(mmDMA_IF_W_N_HBM1_WR_CRED_CNT, hbm1_wr);
1771 WREG32(mmDMA_IF_W_N_HBM0_RD_CRED_CNT, hbm0_rd);
1772 WREG32(mmDMA_IF_W_N_HBM1_RD_CRED_CNT, hbm1_rd);
1774 WREG32(mmDMA_IF_W_S_HBM0_WR_CRED_CNT, hbm0_wr);
1775 WREG32(mmDMA_IF_W_S_HBM1_WR_CRED_CNT, hbm1_wr);
1776 WREG32(mmDMA_IF_W_S_HBM0_RD_CRED_CNT, hbm0_rd);
1777 WREG32(mmDMA_IF_W_S_HBM1_RD_CRED_CNT, hbm1_rd);
1779 WREG32(mmDMA_IF_E_N_HBM_CRED_EN_0,
1780 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
1781 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
1782 WREG32(mmDMA_IF_E_S_HBM_CRED_EN_0,
1783 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
1784 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
1785 WREG32(mmDMA_IF_W_N_HBM_CRED_EN_0,
1786 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
1787 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
1788 WREG32(mmDMA_IF_W_S_HBM_CRED_EN_0,
1789 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
1790 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
1792 WREG32(mmDMA_IF_E_N_HBM_CRED_EN_1,
1793 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
1794 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
1795 WREG32(mmDMA_IF_E_S_HBM_CRED_EN_1,
1796 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
1797 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
1798 WREG32(mmDMA_IF_W_N_HBM_CRED_EN_1,
1799 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
1800 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
1801 WREG32(mmDMA_IF_W_S_HBM_CRED_EN_1,
1802 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
1803 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
1806 static void gaudi_init_golden_registers(struct hl_device *hdev)
1811 gaudi_init_e2e(hdev);
1813 gaudi_init_hbm_cred(hdev);
1815 hdev->asic_funcs->disable_clock_gating(hdev);
1817 for (tpc_id = 0, tpc_offset = 0;
1818 tpc_id < TPC_NUMBER_OF_ENGINES;
1819 tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
1820 /* Mask all arithmetic interrupts from TPC */
1821 WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFF);
1822 /* Set 16 cache lines */
1823 WREG32_FIELD(TPC0_CFG_MSS_CONFIG, tpc_offset,
1824 ICACHE_FETCH_LINE_NUM, 2);
1827 /* Make sure 1st 128 bytes in SRAM are 0 for Tensor DMA */
1828 for (i = 0 ; i < 128 ; i += 8)
1829 writeq(0, hdev->pcie_bar[SRAM_BAR_ID] + i);
1831 WREG32(mmMME0_CTRL_EUS_ROLLUP_CNT_ADD, 3);
1832 WREG32(mmMME1_CTRL_EUS_ROLLUP_CNT_ADD, 3);
1833 WREG32(mmMME2_CTRL_EUS_ROLLUP_CNT_ADD, 3);
1834 WREG32(mmMME3_CTRL_EUS_ROLLUP_CNT_ADD, 3);
1837 static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
1838 int qman_id, dma_addr_t qman_pq_addr)
1840 u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
1841 u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
1842 u32 q_off, dma_qm_offset;
1845 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
1847 mtr_base_en_lo = lower_32_bits(CFG_BASE +
1848 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
1849 mtr_base_en_hi = upper_32_bits(CFG_BASE +
1850 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
1851 so_base_en_lo = lower_32_bits(CFG_BASE +
1852 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
1853 so_base_en_hi = upper_32_bits(CFG_BASE +
1854 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
1855 mtr_base_ws_lo = lower_32_bits(CFG_BASE +
1856 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
1857 mtr_base_ws_hi = upper_32_bits(CFG_BASE +
1858 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
1859 so_base_ws_lo = lower_32_bits(CFG_BASE +
1860 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
1861 so_base_ws_hi = upper_32_bits(CFG_BASE +
1862 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
1864 q_off = dma_qm_offset + qman_id * 4;
1866 WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_pq_addr));
1867 WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_pq_addr));
1869 WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HL_QUEUE_LENGTH));
1870 WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
1871 WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
1873 WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, QMAN_LDMA_SIZE_OFFSET);
1874 WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
1875 QMAN_LDMA_SRC_OFFSET);
1876 WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
1877 QMAN_LDMA_DST_OFFSET);
1879 WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
1880 WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
1881 WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
1882 WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
1883 WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo);
1884 WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi);
1885 WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
1886 WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
1888 WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100);
1890 /* The following configuration is needed only once per QMAN */
1892 /* Configure RAZWI IRQ */
1893 dma_qm_err_cfg = PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
1894 if (hdev->stop_on_err) {
1896 PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
1899 WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
1900 WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
1901 lower_32_bits(CFG_BASE +
1902 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
1903 WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
1904 upper_32_bits(CFG_BASE +
1905 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
1906 WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
1907 gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
1910 WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
1911 QM_ARB_ERR_MSG_EN_MASK);
1913 /* Increase ARB WDT to support streams architecture */
1914 WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
1915 GAUDI_ARB_WDT_TIMEOUT);
1917 WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
1918 QMAN_EXTERNAL_MAKE_TRUSTED);
1920 WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
1924 static void gaudi_init_dma_core(struct hl_device *hdev, int dma_id)
1926 u32 dma_offset = dma_id * DMA_CORE_OFFSET;
1927 u32 dma_err_cfg = 1 << DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT;
1929 /* Set to maximum possible according to physical size */
1930 WREG32(mmDMA0_CORE_RD_MAX_OUTSTAND + dma_offset, 0);
1931 WREG32(mmDMA0_CORE_RD_MAX_SIZE + dma_offset, 0);
1933 /* WA for H/W bug H3-2116 */
1934 WREG32(mmDMA0_CORE_LBW_MAX_OUTSTAND + dma_offset, 15);
1936 /* STOP_ON bit implies no completion to operation in case of RAZWI */
1937 if (hdev->stop_on_err)
1938 dma_err_cfg |= 1 << DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT;
1940 WREG32(mmDMA0_CORE_ERR_CFG + dma_offset, dma_err_cfg);
1941 WREG32(mmDMA0_CORE_ERRMSG_ADDR_LO + dma_offset,
1942 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
1943 WREG32(mmDMA0_CORE_ERRMSG_ADDR_HI + dma_offset,
1944 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
1945 WREG32(mmDMA0_CORE_ERRMSG_WDATA + dma_offset,
1946 gaudi_irq_map_table[GAUDI_EVENT_DMA0_CORE].cpu_id + dma_id);
1947 WREG32(mmDMA0_CORE_PROT + dma_offset,
1948 1 << DMA0_CORE_PROT_ERR_VAL_SHIFT);
1949 /* If the channel is secured, it should be in MMU bypass mode */
1950 WREG32(mmDMA0_CORE_SECURE_PROPS + dma_offset,
1951 1 << DMA0_CORE_SECURE_PROPS_MMBP_SHIFT);
1952 WREG32(mmDMA0_CORE_CFG_0 + dma_offset, 1 << DMA0_CORE_CFG_0_EN_SHIFT);
1955 static void gaudi_enable_qman(struct hl_device *hdev, int dma_id,
1958 u32 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
1960 WREG32(mmDMA0_QM_GLBL_CFG0 + dma_qm_offset, enable_mask);
1963 static void gaudi_init_pci_dma_qmans(struct hl_device *hdev)
1965 struct gaudi_device *gaudi = hdev->asic_specific;
1966 struct hl_hw_queue *q;
1967 int i, j, dma_id, cpu_skip, nic_skip, cq_id = 0, q_idx, msi_vec = 0;
1969 if (gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)
1972 for (i = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
1973 dma_id = gaudi_dma_assignment[i];
1975 * For queues after the CPU Q need to add 1 to get the correct
1976 * queue. In addition, need to add the CPU EQ and NIC IRQs in
1977 * order to get the correct MSI register.
1981 nic_skip = NIC_NUMBER_OF_ENGINES;
1987 for (j = 0 ; j < QMAN_STREAMS ; j++) {
1988 q_idx = 4 * dma_id + j + cpu_skip;
1989 q = &hdev->kernel_queues[q_idx];
1991 q->msi_vec = nic_skip + cpu_skip + msi_vec++;
1992 gaudi_init_pci_dma_qman(hdev, dma_id, j,
1996 gaudi_init_dma_core(hdev, dma_id);
1998 gaudi_enable_qman(hdev, dma_id, PCI_DMA_QMAN_ENABLE);
2001 gaudi->hw_cap_initialized |= HW_CAP_PCI_DMA;
2004 static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
2005 int qman_id, u64 qman_base_addr)
2007 u32 mtr_base_lo, mtr_base_hi;
2008 u32 so_base_lo, so_base_hi;
2009 u32 q_off, dma_qm_offset;
2012 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
2014 mtr_base_lo = lower_32_bits(CFG_BASE +
2015 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2016 mtr_base_hi = upper_32_bits(CFG_BASE +
2017 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2018 so_base_lo = lower_32_bits(CFG_BASE +
2019 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2020 so_base_hi = upper_32_bits(CFG_BASE +
2021 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2023 q_off = dma_qm_offset + qman_id * 4;
2026 WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off,
2027 lower_32_bits(qman_base_addr));
2028 WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off,
2029 upper_32_bits(qman_base_addr));
2031 WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HBM_DMA_QMAN_LENGTH));
2032 WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
2033 WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
2035 WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
2036 QMAN_CPDMA_SIZE_OFFSET);
2037 WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
2038 QMAN_CPDMA_SRC_OFFSET);
2039 WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
2040 QMAN_CPDMA_DST_OFFSET);
2042 WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
2043 QMAN_LDMA_SIZE_OFFSET);
2044 WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
2045 QMAN_LDMA_SRC_OFFSET);
2046 WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
2047 QMAN_LDMA_DST_OFFSET);
2049 /* Configure RAZWI IRQ */
2050 dma_qm_err_cfg = HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
2051 if (hdev->stop_on_err) {
2053 HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
2055 WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
2057 WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
2058 lower_32_bits(CFG_BASE +
2059 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
2060 WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
2061 upper_32_bits(CFG_BASE +
2062 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
2063 WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
2064 gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
2067 WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
2068 QM_ARB_ERR_MSG_EN_MASK);
2070 /* Increase ARB WDT to support streams architecture */
2071 WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
2072 GAUDI_ARB_WDT_TIMEOUT);
2074 WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
2075 WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
2076 QMAN_INTERNAL_MAKE_TRUSTED);
2079 WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
2080 WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
2081 WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
2082 WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
2085 static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev)
2087 struct gaudi_device *gaudi = hdev->asic_specific;
2088 struct gaudi_internal_qman_info *q;
2090 int i, j, dma_id, internal_q_index;
2092 if (gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)
2095 for (i = 0 ; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
2096 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1 + i];
2098 for (j = 0 ; j < QMAN_STREAMS ; j++) {
2100 * Add the CPU queue in order to get the correct queue
2101 * number as all internal queue are placed after it
2103 internal_q_index = dma_id * QMAN_STREAMS + j + 1;
2105 q = &gaudi->internal_qmans[internal_q_index];
2106 qman_base_addr = (u64) q->pq_dma_addr;
2107 gaudi_init_hbm_dma_qman(hdev, dma_id, j,
2111 /* Initializing lower CP for HBM DMA QMAN */
2112 gaudi_init_hbm_dma_qman(hdev, dma_id, 4, 0);
2114 gaudi_init_dma_core(hdev, dma_id);
2116 gaudi_enable_qman(hdev, dma_id, HBM_DMA_QMAN_ENABLE);
2119 gaudi->hw_cap_initialized |= HW_CAP_HBM_DMA;
2122 static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
2123 int qman_id, u64 qman_base_addr)
2125 u32 mtr_base_lo, mtr_base_hi;
2126 u32 so_base_lo, so_base_hi;
2130 mtr_base_lo = lower_32_bits(CFG_BASE +
2131 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2132 mtr_base_hi = upper_32_bits(CFG_BASE +
2133 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2134 so_base_lo = lower_32_bits(CFG_BASE +
2135 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2136 so_base_hi = upper_32_bits(CFG_BASE +
2137 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2139 q_off = mme_offset + qman_id * 4;
2142 WREG32(mmMME0_QM_PQ_BASE_LO_0 + q_off,
2143 lower_32_bits(qman_base_addr));
2144 WREG32(mmMME0_QM_PQ_BASE_HI_0 + q_off,
2145 upper_32_bits(qman_base_addr));
2147 WREG32(mmMME0_QM_PQ_SIZE_0 + q_off, ilog2(MME_QMAN_LENGTH));
2148 WREG32(mmMME0_QM_PQ_PI_0 + q_off, 0);
2149 WREG32(mmMME0_QM_PQ_CI_0 + q_off, 0);
2151 WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
2152 QMAN_CPDMA_SIZE_OFFSET);
2153 WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
2154 QMAN_CPDMA_SRC_OFFSET);
2155 WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
2156 QMAN_CPDMA_DST_OFFSET);
2158 WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
2159 QMAN_LDMA_SIZE_OFFSET);
2160 WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
2161 QMAN_LDMA_SRC_OFFSET);
2162 WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
2163 QMAN_LDMA_DST_OFFSET);
2165 /* Configure RAZWI IRQ */
2166 mme_id = mme_offset /
2167 (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0) / 2;
2169 mme_qm_err_cfg = MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
2170 if (hdev->stop_on_err) {
2172 MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
2174 WREG32(mmMME0_QM_GLBL_ERR_CFG + mme_offset, mme_qm_err_cfg);
2175 WREG32(mmMME0_QM_GLBL_ERR_ADDR_LO + mme_offset,
2176 lower_32_bits(CFG_BASE +
2177 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
2178 WREG32(mmMME0_QM_GLBL_ERR_ADDR_HI + mme_offset,
2179 upper_32_bits(CFG_BASE +
2180 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
2181 WREG32(mmMME0_QM_GLBL_ERR_WDATA + mme_offset,
2182 gaudi_irq_map_table[GAUDI_EVENT_MME0_QM].cpu_id +
2185 WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset,
2186 QM_ARB_ERR_MSG_EN_MASK);
2188 /* Increase ARB WDT to support streams architecture */
2189 WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset,
2190 GAUDI_ARB_WDT_TIMEOUT);
2192 WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0);
2193 WREG32(mmMME0_QM_GLBL_PROT + mme_offset,
2194 QMAN_INTERNAL_MAKE_TRUSTED);
2197 WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
2198 WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
2199 WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
2200 WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
2203 static void gaudi_init_mme_qmans(struct hl_device *hdev)
2205 struct gaudi_device *gaudi = hdev->asic_specific;
2206 struct gaudi_internal_qman_info *q;
2209 int i, internal_q_index;
2211 if (gaudi->hw_cap_initialized & HW_CAP_MME)
2215 * map GAUDI_QUEUE_ID_MME_0_X to the N_W_MME (mmMME2_QM_BASE)
2216 * and GAUDI_QUEUE_ID_MME_1_X to the S_W_MME (mmMME0_QM_BASE)
2219 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
2221 for (i = 0 ; i < MME_NUMBER_OF_QMANS ; i++) {
2222 internal_q_index = GAUDI_QUEUE_ID_MME_0_0 + i;
2223 q = &gaudi->internal_qmans[internal_q_index];
2224 qman_base_addr = (u64) q->pq_dma_addr;
2225 gaudi_init_mme_qman(hdev, mme_offset, (i & 0x3),
2231 /* Initializing lower CP for MME QMANs */
2232 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
2233 gaudi_init_mme_qman(hdev, mme_offset, 4, 0);
2234 gaudi_init_mme_qman(hdev, 0, 4, 0);
2236 WREG32(mmMME2_QM_GLBL_CFG0, QMAN_MME_ENABLE);
2237 WREG32(mmMME0_QM_GLBL_CFG0, QMAN_MME_ENABLE);
2239 gaudi->hw_cap_initialized |= HW_CAP_MME;
2242 static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
2243 int qman_id, u64 qman_base_addr)
2245 u32 mtr_base_lo, mtr_base_hi;
2246 u32 so_base_lo, so_base_hi;
2250 mtr_base_lo = lower_32_bits(CFG_BASE +
2251 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2252 mtr_base_hi = upper_32_bits(CFG_BASE +
2253 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2254 so_base_lo = lower_32_bits(CFG_BASE +
2255 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2256 so_base_hi = upper_32_bits(CFG_BASE +
2257 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2259 q_off = tpc_offset + qman_id * 4;
2262 WREG32(mmTPC0_QM_PQ_BASE_LO_0 + q_off,
2263 lower_32_bits(qman_base_addr));
2264 WREG32(mmTPC0_QM_PQ_BASE_HI_0 + q_off,
2265 upper_32_bits(qman_base_addr));
2267 WREG32(mmTPC0_QM_PQ_SIZE_0 + q_off, ilog2(TPC_QMAN_LENGTH));
2268 WREG32(mmTPC0_QM_PQ_PI_0 + q_off, 0);
2269 WREG32(mmTPC0_QM_PQ_CI_0 + q_off, 0);
2271 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
2272 QMAN_CPDMA_SIZE_OFFSET);
2273 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
2274 QMAN_CPDMA_SRC_OFFSET);
2275 WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
2276 QMAN_CPDMA_DST_OFFSET);
2278 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
2279 QMAN_LDMA_SIZE_OFFSET);
2280 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
2281 QMAN_LDMA_SRC_OFFSET);
2282 WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
2283 QMAN_LDMA_DST_OFFSET);
2285 /* Configure RAZWI IRQ */
2286 tpc_id = tpc_offset /
2287 (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0);
2289 tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
2290 if (hdev->stop_on_err) {
2292 TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
2295 WREG32(mmTPC0_QM_GLBL_ERR_CFG + tpc_offset, tpc_qm_err_cfg);
2296 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + tpc_offset,
2297 lower_32_bits(CFG_BASE +
2298 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
2299 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + tpc_offset,
2300 upper_32_bits(CFG_BASE +
2301 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
2302 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + tpc_offset,
2303 gaudi_irq_map_table[GAUDI_EVENT_TPC0_QM].cpu_id +
2306 WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset,
2307 QM_ARB_ERR_MSG_EN_MASK);
2309 /* Increase ARB WDT to support streams architecture */
2310 WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset,
2311 GAUDI_ARB_WDT_TIMEOUT);
2313 WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0);
2314 WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset,
2315 QMAN_INTERNAL_MAKE_TRUSTED);
2318 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
2319 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
2320 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
2321 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
2324 static void gaudi_init_tpc_qmans(struct hl_device *hdev)
2326 struct gaudi_device *gaudi = hdev->asic_specific;
2327 struct gaudi_internal_qman_info *q;
2329 u32 so_base_hi, tpc_offset = 0;
2330 u32 tpc_delta = mmTPC1_CFG_SM_BASE_ADDRESS_HIGH -
2331 mmTPC0_CFG_SM_BASE_ADDRESS_HIGH;
2332 int i, tpc_id, internal_q_index;
2334 if (gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)
2337 so_base_hi = upper_32_bits(CFG_BASE +
2338 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2340 for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
2341 for (i = 0 ; i < QMAN_STREAMS ; i++) {
2342 internal_q_index = GAUDI_QUEUE_ID_TPC_0_0 +
2343 tpc_id * QMAN_STREAMS + i;
2344 q = &gaudi->internal_qmans[internal_q_index];
2345 qman_base_addr = (u64) q->pq_dma_addr;
2346 gaudi_init_tpc_qman(hdev, tpc_offset, i,
2350 /* Initializing lower CP for TPC QMAN */
2351 gaudi_init_tpc_qman(hdev, tpc_offset, 4, 0);
2353 /* Enable the QMAN and TPC channel */
2354 WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset,
2359 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + tpc_id * tpc_delta,
2362 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
2364 gaudi->hw_cap_initialized |=
2365 FIELD_PREP(HW_CAP_TPC_MASK, 1 << tpc_id);
2369 static void gaudi_disable_pci_dma_qmans(struct hl_device *hdev)
2371 struct gaudi_device *gaudi = hdev->asic_specific;
2373 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
2376 WREG32(mmDMA0_QM_GLBL_CFG0, 0);
2377 WREG32(mmDMA1_QM_GLBL_CFG0, 0);
2378 WREG32(mmDMA5_QM_GLBL_CFG0, 0);
2381 static void gaudi_disable_hbm_dma_qmans(struct hl_device *hdev)
2383 struct gaudi_device *gaudi = hdev->asic_specific;
2385 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
2388 WREG32(mmDMA2_QM_GLBL_CFG0, 0);
2389 WREG32(mmDMA3_QM_GLBL_CFG0, 0);
2390 WREG32(mmDMA4_QM_GLBL_CFG0, 0);
2391 WREG32(mmDMA6_QM_GLBL_CFG0, 0);
2392 WREG32(mmDMA7_QM_GLBL_CFG0, 0);
2395 static void gaudi_disable_mme_qmans(struct hl_device *hdev)
2397 struct gaudi_device *gaudi = hdev->asic_specific;
2399 if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
2402 WREG32(mmMME2_QM_GLBL_CFG0, 0);
2403 WREG32(mmMME0_QM_GLBL_CFG0, 0);
2406 static void gaudi_disable_tpc_qmans(struct hl_device *hdev)
2408 struct gaudi_device *gaudi = hdev->asic_specific;
2412 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
2415 for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
2416 WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset, 0);
2417 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
2421 static void gaudi_stop_pci_dma_qmans(struct hl_device *hdev)
2423 struct gaudi_device *gaudi = hdev->asic_specific;
2425 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
2428 /* Stop upper CPs of QMANs 0.0 to 1.3 and 5.0 to 5.3 */
2429 WREG32(mmDMA0_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2430 WREG32(mmDMA1_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2431 WREG32(mmDMA5_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2434 static void gaudi_stop_hbm_dma_qmans(struct hl_device *hdev)
2436 struct gaudi_device *gaudi = hdev->asic_specific;
2438 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
2441 /* Stop CPs of HBM DMA QMANs */
2443 WREG32(mmDMA2_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2444 WREG32(mmDMA3_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2445 WREG32(mmDMA4_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2446 WREG32(mmDMA6_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2447 WREG32(mmDMA7_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2450 static void gaudi_stop_mme_qmans(struct hl_device *hdev)
2452 struct gaudi_device *gaudi = hdev->asic_specific;
2454 if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
2457 /* Stop CPs of MME QMANs */
2458 WREG32(mmMME2_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2459 WREG32(mmMME0_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2462 static void gaudi_stop_tpc_qmans(struct hl_device *hdev)
2464 struct gaudi_device *gaudi = hdev->asic_specific;
2466 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
2469 WREG32(mmTPC0_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2470 WREG32(mmTPC1_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2471 WREG32(mmTPC2_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2472 WREG32(mmTPC3_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2473 WREG32(mmTPC4_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2474 WREG32(mmTPC5_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2475 WREG32(mmTPC6_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2476 WREG32(mmTPC7_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
2479 static void gaudi_pci_dma_stall(struct hl_device *hdev)
2481 struct gaudi_device *gaudi = hdev->asic_specific;
2483 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
2486 WREG32(mmDMA0_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
2487 WREG32(mmDMA1_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
2488 WREG32(mmDMA5_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
2491 static void gaudi_hbm_dma_stall(struct hl_device *hdev)
2493 struct gaudi_device *gaudi = hdev->asic_specific;
2495 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
2498 WREG32(mmDMA2_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
2499 WREG32(mmDMA3_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
2500 WREG32(mmDMA4_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
2501 WREG32(mmDMA6_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
2502 WREG32(mmDMA7_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
2505 static void gaudi_mme_stall(struct hl_device *hdev)
2507 struct gaudi_device *gaudi = hdev->asic_specific;
2509 if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
2512 /* WA for H3-1800 bug: do ACC and SBAB writes twice */
2513 WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
2514 WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
2515 WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
2516 WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
2517 WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
2518 WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
2519 WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
2520 WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
2521 WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
2522 WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
2523 WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
2524 WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
2525 WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
2526 WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
2527 WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
2528 WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
2531 static void gaudi_tpc_stall(struct hl_device *hdev)
2533 struct gaudi_device *gaudi = hdev->asic_specific;
2535 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
2538 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2539 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2540 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2541 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2542 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2543 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2544 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2545 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2548 static void gaudi_set_clock_gating(struct hl_device *hdev)
2550 struct gaudi_device *gaudi = hdev->asic_specific;
2555 /* In case we are during debug session, don't enable the clock gate
2556 * as it may interfere
2561 for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
2562 enable = !!(hdev->clock_gating_mask &
2563 (BIT_ULL(gaudi_dma_assignment[i])));
2565 qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
2566 WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
2567 enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2568 WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
2569 enable ? QMAN_UPPER_CP_CGM_PWR_GATE_EN : 0);
2572 for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) {
2573 enable = !!(hdev->clock_gating_mask &
2574 (BIT_ULL(gaudi_dma_assignment[i])));
2576 qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
2577 WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
2578 enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2579 WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
2580 enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
2583 enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0)));
2584 WREG32(mmMME0_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2585 WREG32(mmMME0_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
2587 enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2)));
2588 WREG32(mmMME2_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2589 WREG32(mmMME2_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
2591 for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
2592 enable = !!(hdev->clock_gating_mask &
2593 (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i)));
2595 WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
2596 enable ? QMAN_CGM1_PWR_GATE_EN : 0);
2597 WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
2598 enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
2600 qman_offset += TPC_QMAN_OFFSET;
2603 gaudi->hw_cap_initialized |= HW_CAP_CLK_GATE;
2606 static void gaudi_disable_clock_gating(struct hl_device *hdev)
2608 struct gaudi_device *gaudi = hdev->asic_specific;
2612 if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
2615 for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
2616 WREG32(mmDMA0_QM_CGM_CFG + qman_offset, 0);
2617 WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, 0);
2619 qman_offset += (mmDMA1_QM_CGM_CFG - mmDMA0_QM_CGM_CFG);
2622 WREG32(mmMME0_QM_CGM_CFG, 0);
2623 WREG32(mmMME0_QM_CGM_CFG1, 0);
2624 WREG32(mmMME2_QM_CGM_CFG, 0);
2625 WREG32(mmMME2_QM_CGM_CFG1, 0);
2627 for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
2628 WREG32(mmTPC0_QM_CGM_CFG + qman_offset, 0);
2629 WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, 0);
2631 qman_offset += (mmTPC1_QM_CGM_CFG - mmTPC0_QM_CGM_CFG);
2634 gaudi->hw_cap_initialized &= ~(HW_CAP_CLK_GATE);
2637 static void gaudi_enable_timestamp(struct hl_device *hdev)
2639 /* Disable the timestamp counter */
2640 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2642 /* Zero the lower/upper parts of the 64-bit counter */
2643 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
2644 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
2646 /* Enable the counter */
2647 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
2650 static void gaudi_disable_timestamp(struct hl_device *hdev)
2652 /* Disable the timestamp counter */
2653 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2656 static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
2658 u32 wait_timeout_ms;
2661 "Halting compute engines and disabling interrupts\n");
2664 wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
2666 wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
2669 gaudi_stop_mme_qmans(hdev);
2670 gaudi_stop_tpc_qmans(hdev);
2671 gaudi_stop_hbm_dma_qmans(hdev);
2672 gaudi_stop_pci_dma_qmans(hdev);
2674 hdev->asic_funcs->disable_clock_gating(hdev);
2676 msleep(wait_timeout_ms);
2678 gaudi_pci_dma_stall(hdev);
2679 gaudi_hbm_dma_stall(hdev);
2680 gaudi_tpc_stall(hdev);
2681 gaudi_mme_stall(hdev);
2683 msleep(wait_timeout_ms);
2685 gaudi_disable_mme_qmans(hdev);
2686 gaudi_disable_tpc_qmans(hdev);
2687 gaudi_disable_hbm_dma_qmans(hdev);
2688 gaudi_disable_pci_dma_qmans(hdev);
2690 gaudi_disable_timestamp(hdev);
2692 gaudi_disable_msi(hdev);
2695 static int gaudi_mmu_init(struct hl_device *hdev)
2697 struct asic_fixed_properties *prop = &hdev->asic_prop;
2698 struct gaudi_device *gaudi = hdev->asic_specific;
2702 if (!hdev->mmu_enable)
2705 if (gaudi->hw_cap_initialized & HW_CAP_MMU)
2708 hdev->dram_supports_virtual_memory = false;
2710 for (i = 0 ; i < prop->max_asid ; i++) {
2711 hop0_addr = prop->mmu_pgt_addr +
2712 (i * prop->mmu_hop_table_size);
2714 rc = gaudi_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2717 "failed to set hop0 addr for asid %d\n", i);
2722 /* init MMU cache manage page */
2723 WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
2724 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2726 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, 0);
2728 WREG32(mmMMU_UP_MMU_ENABLE, 1);
2729 WREG32(mmMMU_UP_SPI_MASK, 0xF);
2731 WREG32(mmSTLB_HOP_CONFIGURATION,
2732 hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
2735 * The H/W expects the first PI after init to be 1. After wraparound
2738 gaudi->mmu_cache_inv_pi = 1;
2740 gaudi->hw_cap_initialized |= HW_CAP_MMU;
2748 static int gaudi_load_firmware_to_device(struct hl_device *hdev)
2752 /* HBM scrambler must be initialized before pushing F/W to HBM */
2753 gaudi_init_scrambler_hbm(hdev);
2755 dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET;
2757 return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst);
2760 static int gaudi_load_boot_fit_to_device(struct hl_device *hdev)
2764 dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
2766 return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst);
2769 static void gaudi_read_device_fw_version(struct hl_device *hdev,
2770 enum hl_fw_component fwc)
2778 ver_off = RREG32(mmUBOOT_VER_OFFSET);
2779 dest = hdev->asic_prop.uboot_ver;
2782 case FW_COMP_PREBOOT:
2783 ver_off = RREG32(mmPREBOOT_VER_OFFSET);
2784 dest = hdev->asic_prop.preboot_ver;
2788 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2792 ver_off &= ~((u32)SRAM_BASE_ADDR);
2794 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2795 memcpy_fromio(dest, hdev->pcie_bar[SRAM_BAR_ID] + ver_off,
2798 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2800 strcpy(dest, "unavailable");
2804 static int gaudi_init_cpu(struct hl_device *hdev)
2806 struct gaudi_device *gaudi = hdev->asic_specific;
2809 if (!hdev->cpu_enable)
2812 if (gaudi->hw_cap_initialized & HW_CAP_CPU)
2816 * The device CPU works with 40 bits addresses.
2817 * This register sets the extension to 50 bits.
2819 WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr);
2821 rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
2822 mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU,
2823 mmCPU_CMD_STATUS_TO_HOST,
2825 !hdev->bmc_enable, GAUDI_CPU_TIMEOUT_USEC,
2826 GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
2831 gaudi->hw_cap_initialized |= HW_CAP_CPU;
2836 static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
2838 struct gaudi_device *gaudi = hdev->asic_specific;
2841 struct hl_hw_queue *cpu_pq =
2842 &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
2845 if (!hdev->cpu_queues_enable)
2848 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
2851 eq = &hdev->event_queue;
2853 WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
2854 WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
2856 WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
2857 WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
2859 WREG32(mmCPU_IF_CQ_BASE_ADDR_LOW,
2860 lower_32_bits(hdev->cpu_accessible_dma_address));
2861 WREG32(mmCPU_IF_CQ_BASE_ADDR_HIGH,
2862 upper_32_bits(hdev->cpu_accessible_dma_address));
2864 WREG32(mmCPU_IF_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
2865 WREG32(mmCPU_IF_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
2866 WREG32(mmCPU_IF_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
2868 /* Used for EQ CI */
2869 WREG32(mmCPU_IF_EQ_RD_OFFS, 0);
2871 WREG32(mmCPU_IF_PF_PQ_PI, 0);
2873 if (gaudi->multi_msi_mode)
2874 WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP);
2876 WREG32(mmCPU_IF_QUEUE_INIT,
2877 PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
2879 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_PI_UPDATE);
2881 err = hl_poll_timeout(
2883 mmCPU_IF_QUEUE_INIT,
2885 (status == PQ_INIT_STATUS_READY_FOR_HOST),
2891 "Failed to communicate with Device CPU (CPU-CP timeout)\n");
2895 gaudi->hw_cap_initialized |= HW_CAP_CPU_Q;
2899 static void gaudi_pre_hw_init(struct hl_device *hdev)
2901 /* Perform read from the device to make sure device is up */
2904 /* Set the access through PCI bars (Linux driver only) as
2907 WREG32(mmPCIE_WRAP_LBW_PROT_OVR,
2908 (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK |
2909 PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK));
2911 /* Perform read to flush the waiting writes to ensure
2912 * configuration was set in the device
2914 RREG32(mmPCIE_WRAP_LBW_PROT_OVR);
2917 * Let's mark in the H/W that we have reached this point. We check
2918 * this value in the reset_before_init function to understand whether
2919 * we need to reset the chip before doing H/W init. This register is
2920 * cleared by the H/W upon H/W reset
2922 WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
2924 /* Configure the reset registers. Must be done as early as possible
2925 * in case we fail during H/W initialization
2927 WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H,
2928 (CFG_RST_H_DMA_MASK |
2929 CFG_RST_H_MME_MASK |
2931 CFG_RST_H_TPC_7_MASK));
2933 WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK);
2935 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H,
2936 (CFG_RST_H_HBM_MASK |
2937 CFG_RST_H_TPC_7_MASK |
2938 CFG_RST_H_NIC_MASK |
2940 CFG_RST_H_DMA_MASK |
2941 CFG_RST_H_MME_MASK |
2942 CFG_RST_H_CPU_MASK |
2943 CFG_RST_H_MMU_MASK));
2945 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L,
2946 (CFG_RST_L_IF_MASK |
2947 CFG_RST_L_PSOC_MASK |
2948 CFG_RST_L_TPC_MASK));
2951 static int gaudi_hw_init(struct hl_device *hdev)
2955 dev_info(hdev->dev, "Starting initialization of H/W\n");
2957 gaudi_pre_hw_init(hdev);
2959 gaudi_init_pci_dma_qmans(hdev);
2961 gaudi_init_hbm_dma_qmans(hdev);
2963 rc = gaudi_init_cpu(hdev);
2965 dev_err(hdev->dev, "failed to initialize CPU\n");
2969 /* SRAM scrambler must be initialized after CPU is running from HBM */
2970 gaudi_init_scrambler_sram(hdev);
2972 /* This is here just in case we are working without CPU */
2973 gaudi_init_scrambler_hbm(hdev);
2975 gaudi_init_golden_registers(hdev);
2977 rc = gaudi_mmu_init(hdev);
2981 gaudi_init_security(hdev);
2983 gaudi_init_mme_qmans(hdev);
2985 gaudi_init_tpc_qmans(hdev);
2987 hdev->asic_funcs->set_clock_gating(hdev);
2989 gaudi_enable_timestamp(hdev);
2991 /* MSI must be enabled before CPU queues are initialized */
2992 rc = gaudi_enable_msi(hdev);
2994 goto disable_queues;
2996 /* must be called after MSI was enabled */
2997 rc = gaudi_init_cpu_queues(hdev, GAUDI_CPU_TIMEOUT_USEC);
2999 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
3004 /* Perform read from the device to flush all configuration */
3010 gaudi_disable_msi(hdev);
3012 gaudi_disable_mme_qmans(hdev);
3013 gaudi_disable_pci_dma_qmans(hdev);
3018 static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
3020 struct gaudi_device *gaudi = hdev->asic_specific;
3021 u32 status, reset_timeout_ms, cpu_timeout_ms, boot_strap = 0;
3024 dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
3029 reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
3030 cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
3032 reset_timeout_ms = GAUDI_RESET_TIMEOUT_MSEC;
3033 cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
3036 /* Set device to handle FLR by H/W as we will put the device CPU to
3039 WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK |
3040 PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
3042 /* I don't know what is the state of the CPU so make sure it is
3043 * stopped in any means necessary
3045 WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
3046 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
3048 msleep(cpu_timeout_ms);
3050 /* Tell ASIC not to re-initialize PCIe */
3051 WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
3053 boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
3056 * rdata[31:0] = strap_read_val;
3057 * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
3059 boot_strap = (((boot_strap & 0x7FE00000) << 1) |
3060 (boot_strap & 0x001FFFFF));
3061 WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
3063 /* Restart BTL/BLR upon hard-reset */
3064 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
3066 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
3067 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
3069 "Issued HARD reset command, going to wait %dms\n",
3073 * After hard reset, we can't poll the BTM_FSM register because the PSOC
3074 * itself is in reset. Need to wait until the reset is deasserted
3076 msleep(reset_timeout_ms);
3078 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
3079 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
3081 "Timeout while waiting for device to reset 0x%x\n",
3084 WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap);
3086 gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
3087 HW_CAP_HBM | HW_CAP_PCI_DMA |
3088 HW_CAP_MME | HW_CAP_TPC_MASK |
3089 HW_CAP_HBM_DMA | HW_CAP_PLL |
3091 HW_CAP_SRAM_SCRAMBLER |
3092 HW_CAP_HBM_SCRAMBLER |
3095 memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
3098 static int gaudi_suspend(struct hl_device *hdev)
3102 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
3104 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
3109 static int gaudi_resume(struct hl_device *hdev)
3111 return gaudi_init_iatu(hdev);
3114 static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
3115 void *cpu_addr, dma_addr_t dma_addr, size_t size)
3119 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
3120 VM_DONTCOPY | VM_NORESERVE;
3122 rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
3123 (dma_addr - HOST_PHYS_BASE), size);
3125 dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
3130 static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
3132 struct gaudi_device *gaudi = hdev->asic_specific;
3133 u32 db_reg_offset, db_value, dma_qm_offset, q_off;
3135 bool invalid_queue = false;
3137 switch (hw_queue_id) {
3138 case GAUDI_QUEUE_ID_DMA_0_0...GAUDI_QUEUE_ID_DMA_0_3:
3139 dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1];
3140 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
3141 q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
3142 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
3145 case GAUDI_QUEUE_ID_DMA_1_0...GAUDI_QUEUE_ID_DMA_1_3:
3146 dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
3147 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
3148 q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
3149 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
3152 case GAUDI_QUEUE_ID_DMA_2_0...GAUDI_QUEUE_ID_DMA_2_3:
3153 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1];
3154 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
3155 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
3156 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
3159 case GAUDI_QUEUE_ID_DMA_3_0...GAUDI_QUEUE_ID_DMA_3_3:
3160 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_2];
3161 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
3162 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
3163 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
3166 case GAUDI_QUEUE_ID_DMA_4_0...GAUDI_QUEUE_ID_DMA_4_3:
3167 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_3];
3168 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
3169 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
3170 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
3173 case GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3:
3174 dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_3];
3175 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
3176 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
3177 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
3180 case GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3:
3181 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4];
3182 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
3183 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
3184 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
3187 case GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3:
3188 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5];
3189 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
3190 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
3191 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
3194 case GAUDI_QUEUE_ID_CPU_PQ:
3195 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
3196 db_reg_offset = mmCPU_IF_PF_PQ_PI;
3198 invalid_queue = true;
3201 case GAUDI_QUEUE_ID_MME_0_0:
3202 db_reg_offset = mmMME2_QM_PQ_PI_0;
3205 case GAUDI_QUEUE_ID_MME_0_1:
3206 db_reg_offset = mmMME2_QM_PQ_PI_1;
3209 case GAUDI_QUEUE_ID_MME_0_2:
3210 db_reg_offset = mmMME2_QM_PQ_PI_2;
3213 case GAUDI_QUEUE_ID_MME_0_3:
3214 db_reg_offset = mmMME2_QM_PQ_PI_3;
3217 case GAUDI_QUEUE_ID_MME_1_0:
3218 db_reg_offset = mmMME0_QM_PQ_PI_0;
3221 case GAUDI_QUEUE_ID_MME_1_1:
3222 db_reg_offset = mmMME0_QM_PQ_PI_1;
3225 case GAUDI_QUEUE_ID_MME_1_2:
3226 db_reg_offset = mmMME0_QM_PQ_PI_2;
3229 case GAUDI_QUEUE_ID_MME_1_3:
3230 db_reg_offset = mmMME0_QM_PQ_PI_3;
3233 case GAUDI_QUEUE_ID_TPC_0_0:
3234 db_reg_offset = mmTPC0_QM_PQ_PI_0;
3237 case GAUDI_QUEUE_ID_TPC_0_1:
3238 db_reg_offset = mmTPC0_QM_PQ_PI_1;
3241 case GAUDI_QUEUE_ID_TPC_0_2:
3242 db_reg_offset = mmTPC0_QM_PQ_PI_2;
3245 case GAUDI_QUEUE_ID_TPC_0_3:
3246 db_reg_offset = mmTPC0_QM_PQ_PI_3;
3249 case GAUDI_QUEUE_ID_TPC_1_0:
3250 db_reg_offset = mmTPC1_QM_PQ_PI_0;
3253 case GAUDI_QUEUE_ID_TPC_1_1:
3254 db_reg_offset = mmTPC1_QM_PQ_PI_1;
3257 case GAUDI_QUEUE_ID_TPC_1_2:
3258 db_reg_offset = mmTPC1_QM_PQ_PI_2;
3261 case GAUDI_QUEUE_ID_TPC_1_3:
3262 db_reg_offset = mmTPC1_QM_PQ_PI_3;
3265 case GAUDI_QUEUE_ID_TPC_2_0:
3266 db_reg_offset = mmTPC2_QM_PQ_PI_0;
3269 case GAUDI_QUEUE_ID_TPC_2_1:
3270 db_reg_offset = mmTPC2_QM_PQ_PI_1;
3273 case GAUDI_QUEUE_ID_TPC_2_2:
3274 db_reg_offset = mmTPC2_QM_PQ_PI_2;
3277 case GAUDI_QUEUE_ID_TPC_2_3:
3278 db_reg_offset = mmTPC2_QM_PQ_PI_3;
3281 case GAUDI_QUEUE_ID_TPC_3_0:
3282 db_reg_offset = mmTPC3_QM_PQ_PI_0;
3285 case GAUDI_QUEUE_ID_TPC_3_1:
3286 db_reg_offset = mmTPC3_QM_PQ_PI_1;
3289 case GAUDI_QUEUE_ID_TPC_3_2:
3290 db_reg_offset = mmTPC3_QM_PQ_PI_2;
3293 case GAUDI_QUEUE_ID_TPC_3_3:
3294 db_reg_offset = mmTPC3_QM_PQ_PI_3;
3297 case GAUDI_QUEUE_ID_TPC_4_0:
3298 db_reg_offset = mmTPC4_QM_PQ_PI_0;
3301 case GAUDI_QUEUE_ID_TPC_4_1:
3302 db_reg_offset = mmTPC4_QM_PQ_PI_1;
3305 case GAUDI_QUEUE_ID_TPC_4_2:
3306 db_reg_offset = mmTPC4_QM_PQ_PI_2;
3309 case GAUDI_QUEUE_ID_TPC_4_3:
3310 db_reg_offset = mmTPC4_QM_PQ_PI_3;
3313 case GAUDI_QUEUE_ID_TPC_5_0:
3314 db_reg_offset = mmTPC5_QM_PQ_PI_0;
3317 case GAUDI_QUEUE_ID_TPC_5_1:
3318 db_reg_offset = mmTPC5_QM_PQ_PI_1;
3321 case GAUDI_QUEUE_ID_TPC_5_2:
3322 db_reg_offset = mmTPC5_QM_PQ_PI_2;
3325 case GAUDI_QUEUE_ID_TPC_5_3:
3326 db_reg_offset = mmTPC5_QM_PQ_PI_3;
3329 case GAUDI_QUEUE_ID_TPC_6_0:
3330 db_reg_offset = mmTPC6_QM_PQ_PI_0;
3333 case GAUDI_QUEUE_ID_TPC_6_1:
3334 db_reg_offset = mmTPC6_QM_PQ_PI_1;
3337 case GAUDI_QUEUE_ID_TPC_6_2:
3338 db_reg_offset = mmTPC6_QM_PQ_PI_2;
3341 case GAUDI_QUEUE_ID_TPC_6_3:
3342 db_reg_offset = mmTPC6_QM_PQ_PI_3;
3345 case GAUDI_QUEUE_ID_TPC_7_0:
3346 db_reg_offset = mmTPC7_QM_PQ_PI_0;
3349 case GAUDI_QUEUE_ID_TPC_7_1:
3350 db_reg_offset = mmTPC7_QM_PQ_PI_1;
3353 case GAUDI_QUEUE_ID_TPC_7_2:
3354 db_reg_offset = mmTPC7_QM_PQ_PI_2;
3357 case GAUDI_QUEUE_ID_TPC_7_3:
3358 db_reg_offset = mmTPC7_QM_PQ_PI_3;
3362 invalid_queue = true;
3365 if (invalid_queue) {
3366 /* Should never get here */
3367 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
3374 /* ring the doorbell */
3375 WREG32(db_reg_offset, db_value);
3377 if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ)
3378 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
3379 GAUDI_EVENT_PI_UPDATE);
3382 static void gaudi_pqe_write(struct hl_device *hdev, __le64 *pqe,
3385 __le64 *pbd = (__le64 *) bd;
3387 /* The QMANs are on the host memory so a simple copy suffice */
3392 static void *gaudi_dma_alloc_coherent(struct hl_device *hdev, size_t size,
3393 dma_addr_t *dma_handle, gfp_t flags)
3395 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
3398 /* Shift to the device's base physical address of host memory */
3400 *dma_handle += HOST_PHYS_BASE;
3405 static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
3406 void *cpu_addr, dma_addr_t dma_handle)
3408 /* Cancel the device's base physical address of host memory */
3409 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
3411 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
3414 static void *gaudi_get_int_queue_base(struct hl_device *hdev,
3415 u32 queue_id, dma_addr_t *dma_handle,
3418 struct gaudi_device *gaudi = hdev->asic_specific;
3419 struct gaudi_internal_qman_info *q;
3421 if (queue_id >= GAUDI_QUEUE_ID_SIZE ||
3422 gaudi_queue_type[queue_id] != QUEUE_TYPE_INT) {
3423 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
3427 q = &gaudi->internal_qmans[queue_id];
3428 *dma_handle = q->pq_dma_addr;
3429 *queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE;
3431 return q->pq_kernel_addr;
3434 static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
3435 u16 len, u32 timeout, long *result)
3437 struct gaudi_device *gaudi = hdev->asic_specific;
3439 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) {
3446 timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC;
3448 return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
3452 static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3454 struct packet_msg_prot *fence_pkt;
3455 dma_addr_t pkt_dma_addr;
3456 u32 fence_val, tmp, timeout_usec;
3457 dma_addr_t fence_dma_addr;
3462 timeout_usec = GAUDI_PLDM_TEST_QUEUE_WAIT_USEC;
3464 timeout_usec = GAUDI_TEST_QUEUE_WAIT_USEC;
3466 fence_val = GAUDI_QMAN0_FENCE_VAL;
3468 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3472 "Failed to allocate memory for H/W queue %d testing\n",
3479 fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
3480 sizeof(struct packet_msg_prot),
3481 GFP_KERNEL, &pkt_dma_addr);
3484 "Failed to allocate packet for H/W queue %d testing\n",
3487 goto free_fence_ptr;
3490 tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
3491 tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
3492 tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
3494 fence_pkt->ctl = cpu_to_le32(tmp);
3495 fence_pkt->value = cpu_to_le32(fence_val);
3496 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
3498 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3499 sizeof(struct packet_msg_prot),
3503 "Failed to send fence packet to H/W queue %d\n",
3508 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
3509 1000, timeout_usec, true);
3511 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3513 if (rc == -ETIMEDOUT) {
3515 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3516 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3521 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
3524 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
3529 static int gaudi_test_cpu_queue(struct hl_device *hdev)
3531 struct gaudi_device *gaudi = hdev->asic_specific;
3534 * check capability here as send_cpu_message() won't update the result
3535 * value if no capability
3537 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
3540 return hl_fw_test_cpu_queue(hdev);
3543 static int gaudi_test_queues(struct hl_device *hdev)
3545 int i, rc, ret_val = 0;
3547 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) {
3548 if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) {
3549 rc = gaudi_test_queue(hdev, i);
3555 rc = gaudi_test_cpu_queue(hdev);
3562 static void *gaudi_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3563 gfp_t mem_flags, dma_addr_t *dma_handle)
3567 if (size > GAUDI_DMA_POOL_BLK_SIZE)
3570 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3572 /* Shift to the device's base physical address of host memory */
3574 *dma_handle += HOST_PHYS_BASE;
3579 static void gaudi_dma_pool_free(struct hl_device *hdev, void *vaddr,
3580 dma_addr_t dma_addr)
3582 /* Cancel the device's base physical address of host memory */
3583 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
3585 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
3588 static void *gaudi_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
3589 size_t size, dma_addr_t *dma_handle)
3591 return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
3594 static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev,
3595 size_t size, void *vaddr)
3597 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
3600 static int gaudi_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
3601 int nents, enum dma_data_direction dir)
3603 struct scatterlist *sg;
3606 if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
3609 /* Shift to the device's base physical address of host memory */
3610 for_each_sg(sgl, sg, nents, i)
3611 sg->dma_address += HOST_PHYS_BASE;
3616 static void gaudi_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
3617 int nents, enum dma_data_direction dir)
3619 struct scatterlist *sg;
3622 /* Cancel the device's base physical address of host memory */
3623 for_each_sg(sgl, sg, nents, i)
3624 sg->dma_address -= HOST_PHYS_BASE;
3626 dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
3629 static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
3630 struct sg_table *sgt)
3632 struct scatterlist *sg, *sg_next_iter;
3633 u32 count, dma_desc_cnt;
3635 dma_addr_t addr, addr_next;
3639 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3641 len = sg_dma_len(sg);
3642 addr = sg_dma_address(sg);
3647 while ((count + 1) < sgt->nents) {
3648 sg_next_iter = sg_next(sg);
3649 len_next = sg_dma_len(sg_next_iter);
3650 addr_next = sg_dma_address(sg_next_iter);
3655 if ((addr + len == addr_next) &&
3656 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3668 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3671 static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
3672 struct hl_cs_parser *parser,
3673 struct packet_lin_dma *user_dma_pkt,
3674 u64 addr, enum dma_data_direction dir)
3676 struct hl_userptr *userptr;
3679 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3680 parser->job_userptr_list, &userptr))
3681 goto already_pinned;
3683 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3687 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3692 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3694 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3695 userptr->sgt->nents, dir);
3697 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3701 userptr->dma_mapped = true;
3705 parser->patched_cb_size +=
3706 gaudi_get_dma_desc_list_size(hdev, userptr->sgt);
3711 list_del(&userptr->job_node);
3712 hl_unpin_host_memory(hdev, userptr);
3718 static int gaudi_validate_dma_pkt_host(struct hl_device *hdev,
3719 struct hl_cs_parser *parser,
3720 struct packet_lin_dma *user_dma_pkt,
3723 enum dma_data_direction dir;
3724 bool skip_host_mem_pin = false, user_memset;
3728 user_memset = (le32_to_cpu(user_dma_pkt->ctl) &
3729 GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3730 GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3734 skip_host_mem_pin = true;
3736 dev_dbg(hdev->dev, "DMA direction is HOST --> DEVICE\n");
3737 dir = DMA_TO_DEVICE;
3738 addr = le64_to_cpu(user_dma_pkt->src_addr);
3740 dev_dbg(hdev->dev, "DMA direction is DEVICE --> HOST\n");
3741 dir = DMA_FROM_DEVICE;
3742 addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
3743 GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
3744 GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
3747 if (skip_host_mem_pin)
3748 parser->patched_cb_size += sizeof(*user_dma_pkt);
3750 rc = gaudi_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3756 static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3757 struct hl_cs_parser *parser,
3758 struct packet_lin_dma *user_dma_pkt)
3760 bool src_in_host = false;
3761 u64 dst_addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
3762 GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
3763 GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
3765 dev_dbg(hdev->dev, "DMA packet details:\n");
3766 dev_dbg(hdev->dev, "source == 0x%llx\n",
3767 le64_to_cpu(user_dma_pkt->src_addr));
3768 dev_dbg(hdev->dev, "destination == 0x%llx\n", dst_addr);
3769 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3772 * Special handling for DMA with size 0. Bypass all validations
3773 * because no transactions will be done except for WR_COMP, which
3774 * is not a security issue
3776 if (!le32_to_cpu(user_dma_pkt->tsize)) {
3777 parser->patched_cb_size += sizeof(*user_dma_pkt);
3781 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
3784 return gaudi_validate_dma_pkt_host(hdev, parser, user_dma_pkt,
3788 static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev,
3789 struct hl_cs_parser *parser,
3790 struct packet_load_and_exe *user_pkt)
3794 cfg = le32_to_cpu(user_pkt->cfg);
3796 if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) {
3798 "User not allowed to use Load and Execute\n");
3802 parser->patched_cb_size += sizeof(struct packet_load_and_exe);
3807 static int gaudi_validate_cb(struct hl_device *hdev,
3808 struct hl_cs_parser *parser, bool is_mmu)
3810 u32 cb_parsed_length = 0;
3813 parser->patched_cb_size = 0;
3815 /* cb_user_size is more than 0 so loop will always be executed */
3816 while (cb_parsed_length < parser->user_cb_size) {
3817 enum packet_id pkt_id;
3819 struct gaudi_packet *user_pkt;
3821 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
3823 pkt_id = (enum packet_id) (
3824 (le64_to_cpu(user_pkt->header) &
3825 PACKET_HEADER_PACKET_ID_MASK) >>
3826 PACKET_HEADER_PACKET_ID_SHIFT);
3828 if (!validate_packet_id(pkt_id)) {
3829 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
3834 pkt_size = gaudi_packet_sizes[pkt_id];
3835 cb_parsed_length += pkt_size;
3836 if (cb_parsed_length > parser->user_cb_size) {
3838 "packet 0x%x is out of CB boundary\n", pkt_id);
3844 case PACKET_MSG_PROT:
3846 "User not allowed to use MSG_PROT\n");
3851 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3856 dev_err(hdev->dev, "User not allowed to use STOP\n");
3860 case PACKET_WREG_BULK:
3862 "User not allowed to use WREG_BULK\n");
3866 case PACKET_LOAD_AND_EXE:
3867 rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
3868 (struct packet_load_and_exe *) user_pkt);
3871 case PACKET_LIN_DMA:
3872 parser->contains_dma_pkt = true;
3874 parser->patched_cb_size += pkt_size;
3876 rc = gaudi_validate_dma_pkt_no_mmu(hdev, parser,
3877 (struct packet_lin_dma *) user_pkt);
3880 case PACKET_WREG_32:
3881 case PACKET_MSG_LONG:
3882 case PACKET_MSG_SHORT:
3886 case PACKET_ARB_POINT:
3887 parser->patched_cb_size += pkt_size;
3891 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3902 * The new CB should have space at the end for two MSG_PROT packets:
3903 * 1. A packet that will act as a completion packet
3904 * 2. A packet that will generate MSI-X interrupt
3906 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3911 static int gaudi_patch_dma_packet(struct hl_device *hdev,
3912 struct hl_cs_parser *parser,
3913 struct packet_lin_dma *user_dma_pkt,
3914 struct packet_lin_dma *new_dma_pkt,
3915 u32 *new_dma_pkt_size)
3917 struct hl_userptr *userptr;
3918 struct scatterlist *sg, *sg_next_iter;
3919 u32 count, dma_desc_cnt, user_wrcomp_en_mask, ctl;
3921 dma_addr_t dma_addr, dma_addr_next;
3922 u64 device_memory_addr, addr;
3923 enum dma_data_direction dir;
3924 struct sg_table *sgt;
3925 bool src_in_host = false;
3926 bool skip_host_mem_pin = false;
3929 ctl = le32_to_cpu(user_dma_pkt->ctl);
3931 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
3934 user_memset = (ctl & GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3935 GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3938 addr = le64_to_cpu(user_dma_pkt->src_addr);
3939 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3940 dir = DMA_TO_DEVICE;
3942 skip_host_mem_pin = true;
3944 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3945 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3946 dir = DMA_FROM_DEVICE;
3949 if ((!skip_host_mem_pin) &&
3950 (!hl_userptr_is_pinned(hdev, addr,
3951 le32_to_cpu(user_dma_pkt->tsize),
3952 parser->job_userptr_list, &userptr))) {
3953 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3954 addr, user_dma_pkt->tsize);
3958 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3959 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3960 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3964 user_wrcomp_en_mask = ctl & GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
3969 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3970 len = sg_dma_len(sg);
3971 dma_addr = sg_dma_address(sg);
3976 while ((count + 1) < sgt->nents) {
3977 sg_next_iter = sg_next(sg);
3978 len_next = sg_dma_len(sg_next_iter);
3979 dma_addr_next = sg_dma_address(sg_next_iter);
3984 if ((dma_addr + len == dma_addr_next) &&
3985 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3994 ctl = le32_to_cpu(user_dma_pkt->ctl);
3995 if (likely(dma_desc_cnt))
3996 ctl &= ~GAUDI_PKT_CTL_EB_MASK;
3997 ctl &= ~GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
3998 new_dma_pkt->ctl = cpu_to_le32(ctl);
3999 new_dma_pkt->tsize = cpu_to_le32(len);
4001 if (dir == DMA_TO_DEVICE) {
4002 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
4003 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
4005 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
4006 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
4010 device_memory_addr += len;
4015 if (!dma_desc_cnt) {
4017 "Error of 0 SG entries when patching DMA packet\n");
4021 /* Fix the last dma packet - wrcomp must be as user set it */
4023 new_dma_pkt->ctl |= cpu_to_le32(user_wrcomp_en_mask);
4025 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
4030 static int gaudi_patch_cb(struct hl_device *hdev,
4031 struct hl_cs_parser *parser)
4033 u32 cb_parsed_length = 0;
4034 u32 cb_patched_cur_length = 0;
4037 /* cb_user_size is more than 0 so loop will always be executed */
4038 while (cb_parsed_length < parser->user_cb_size) {
4039 enum packet_id pkt_id;
4041 u32 new_pkt_size = 0;
4042 struct gaudi_packet *user_pkt, *kernel_pkt;
4044 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
4045 kernel_pkt = parser->patched_cb->kernel_address +
4046 cb_patched_cur_length;
4048 pkt_id = (enum packet_id) (
4049 (le64_to_cpu(user_pkt->header) &
4050 PACKET_HEADER_PACKET_ID_MASK) >>
4051 PACKET_HEADER_PACKET_ID_SHIFT);
4053 if (!validate_packet_id(pkt_id)) {
4054 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
4059 pkt_size = gaudi_packet_sizes[pkt_id];
4060 cb_parsed_length += pkt_size;
4061 if (cb_parsed_length > parser->user_cb_size) {
4063 "packet 0x%x is out of CB boundary\n", pkt_id);
4069 case PACKET_LIN_DMA:
4070 rc = gaudi_patch_dma_packet(hdev, parser,
4071 (struct packet_lin_dma *) user_pkt,
4072 (struct packet_lin_dma *) kernel_pkt,
4074 cb_patched_cur_length += new_pkt_size;
4077 case PACKET_MSG_PROT:
4079 "User not allowed to use MSG_PROT\n");
4084 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
4089 dev_err(hdev->dev, "User not allowed to use STOP\n");
4093 case PACKET_WREG_32:
4094 case PACKET_WREG_BULK:
4095 case PACKET_MSG_LONG:
4096 case PACKET_MSG_SHORT:
4100 case PACKET_ARB_POINT:
4101 case PACKET_LOAD_AND_EXE:
4102 memcpy(kernel_pkt, user_pkt, pkt_size);
4103 cb_patched_cur_length += pkt_size;
4107 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
4120 static int gaudi_parse_cb_mmu(struct hl_device *hdev,
4121 struct hl_cs_parser *parser)
4123 u64 patched_cb_handle;
4124 u32 patched_cb_size;
4125 struct hl_cb *user_cb;
4129 * The new CB should have space at the end for two MSG_PROT pkt:
4130 * 1. A packet that will act as a completion packet
4131 * 2. A packet that will generate MSI interrupt
4133 parser->patched_cb_size = parser->user_cb_size +
4134 sizeof(struct packet_msg_prot) * 2;
4136 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
4137 parser->patched_cb_size, false, false,
4138 &patched_cb_handle);
4142 "Failed to allocate patched CB for DMA CS %d\n",
4147 patched_cb_handle >>= PAGE_SHIFT;
4148 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4149 (u32) patched_cb_handle);
4150 /* hl_cb_get should never fail here so use kernel WARN */
4151 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4152 (u32) patched_cb_handle);
4153 if (!parser->patched_cb) {
4159 * The check that parser->user_cb_size <= parser->user_cb->size was done
4160 * in validate_queue_index().
4162 memcpy(parser->patched_cb->kernel_address,
4163 parser->user_cb->kernel_address,
4164 parser->user_cb_size);
4166 patched_cb_size = parser->patched_cb_size;
4168 /* Validate patched CB instead of user CB */
4169 user_cb = parser->user_cb;
4170 parser->user_cb = parser->patched_cb;
4171 rc = gaudi_validate_cb(hdev, parser, true);
4172 parser->user_cb = user_cb;
4175 hl_cb_put(parser->patched_cb);
4179 if (patched_cb_size != parser->patched_cb_size) {
4180 dev_err(hdev->dev, "user CB size mismatch\n");
4181 hl_cb_put(parser->patched_cb);
4188 * Always call cb destroy here because we still have 1 reference
4189 * to it by calling cb_get earlier. After the job will be completed,
4190 * cb_put will release it, but here we want to remove it from the
4193 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4194 patched_cb_handle << PAGE_SHIFT);
4199 static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
4200 struct hl_cs_parser *parser)
4202 u64 patched_cb_handle;
4205 rc = gaudi_validate_cb(hdev, parser, false);
4210 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
4211 parser->patched_cb_size, false, false,
4212 &patched_cb_handle);
4215 "Failed to allocate patched CB for DMA CS %d\n", rc);
4219 patched_cb_handle >>= PAGE_SHIFT;
4220 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4221 (u32) patched_cb_handle);
4222 /* hl_cb_get should never fail here so use kernel WARN */
4223 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4224 (u32) patched_cb_handle);
4225 if (!parser->patched_cb) {
4230 rc = gaudi_patch_cb(hdev, parser);
4233 hl_cb_put(parser->patched_cb);
4237 * Always call cb destroy here because we still have 1 reference
4238 * to it by calling cb_get earlier. After the job will be completed,
4239 * cb_put will release it, but here we want to remove it from the
4242 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4243 patched_cb_handle << PAGE_SHIFT);
4247 hl_userptr_delete_list(hdev, parser->job_userptr_list);
4251 static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev,
4252 struct hl_cs_parser *parser)
4254 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4256 /* For internal queue jobs just check if CB address is valid */
4257 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
4258 parser->user_cb_size,
4259 asic_prop->sram_user_base_address,
4260 asic_prop->sram_end_address))
4263 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
4264 parser->user_cb_size,
4265 asic_prop->dram_user_base_address,
4266 asic_prop->dram_end_address))
4269 /* PMMU and HPMMU addresses are equal, check only one of them */
4270 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
4271 parser->user_cb_size,
4272 asic_prop->pmmu.start_addr,
4273 asic_prop->pmmu.end_addr))
4277 "CB address 0x%px + 0x%x for internal QMAN is not valid\n",
4278 parser->user_cb, parser->user_cb_size);
4283 static int gaudi_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4285 struct gaudi_device *gaudi = hdev->asic_specific;
4287 if (parser->queue_type == QUEUE_TYPE_INT)
4288 return gaudi_parse_cb_no_ext_queue(hdev, parser);
4290 if (gaudi->hw_cap_initialized & HW_CAP_MMU)
4291 return gaudi_parse_cb_mmu(hdev, parser);
4293 return gaudi_parse_cb_no_mmu(hdev, parser);
4296 static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
4297 void *kernel_address, u32 len,
4298 u64 cq_addr, u32 cq_val, u32 msi_vec,
4301 struct gaudi_device *gaudi = hdev->asic_specific;
4302 struct packet_msg_prot *cq_pkt;
4305 cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
4307 tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
4308 tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
4311 tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
4313 cq_pkt->ctl = cpu_to_le32(tmp);
4314 cq_pkt->value = cpu_to_le32(cq_val);
4315 cq_pkt->addr = cpu_to_le64(cq_addr);
4319 tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
4320 tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
4321 cq_pkt->ctl = cpu_to_le32(tmp);
4322 cq_pkt->value = cpu_to_le32(1);
4324 if (!gaudi->multi_msi_mode)
4327 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4);
4330 static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
4332 WREG32(mmCPU_IF_EQ_RD_OFFS, val);
4335 static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
4338 struct packet_lin_dma *lin_dma_pkt;
4339 struct hl_cs_job *job;
4340 u32 cb_size, ctl, err_cause;
4344 cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
4348 lin_dma_pkt = cb->kernel_address;
4349 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4350 cb_size = sizeof(*lin_dma_pkt);
4352 ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
4353 ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK, 1);
4354 ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1);
4355 ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
4356 ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
4358 lin_dma_pkt->ctl = cpu_to_le32(ctl);
4359 lin_dma_pkt->src_addr = cpu_to_le64(val);
4360 lin_dma_pkt->dst_addr |= cpu_to_le64(addr);
4361 lin_dma_pkt->tsize = cpu_to_le32(size);
4363 job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
4365 dev_err(hdev->dev, "Failed to allocate a new job\n");
4370 /* Verify DMA is OK */
4371 err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
4372 if (err_cause && !hdev->init_done) {
4374 "Clearing DMA0 engine from errors (cause 0x%x)\n",
4376 WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
4381 job->user_cb->cs_cnt++;
4382 job->user_cb_size = cb_size;
4383 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
4384 job->patched_cb = job->user_cb;
4385 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
4387 hl_debugfs_add_job(hdev, job);
4389 rc = gaudi_send_job_on_qman0(hdev, job);
4390 hl_debugfs_remove_job(hdev, job);
4394 /* Verify DMA is OK */
4395 err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
4397 dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
4399 if (!hdev->init_done) {
4401 "Clearing DMA0 engine from errors (cause 0x%x)\n",
4403 WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
4409 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4414 static void gaudi_restore_sm_registers(struct hl_device *hdev)
4418 for (i = 0 ; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4) {
4419 WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
4420 WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
4421 WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
4424 for (i = 0 ; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4) {
4425 WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
4426 WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
4427 WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
4430 i = GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4;
4432 for (; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4)
4433 WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
4435 i = GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4;
4437 for (; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4)
4438 WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
4441 static void gaudi_restore_dma_registers(struct hl_device *hdev)
4443 u32 sob_delta = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 -
4444 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
4447 for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
4448 u64 sob_addr = CFG_BASE +
4449 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 +
4451 u32 dma_offset = i * DMA_CORE_OFFSET;
4453 WREG32(mmDMA0_CORE_WR_COMP_ADDR_LO + dma_offset,
4454 lower_32_bits(sob_addr));
4455 WREG32(mmDMA0_CORE_WR_COMP_ADDR_HI + dma_offset,
4456 upper_32_bits(sob_addr));
4457 WREG32(mmDMA0_CORE_WR_COMP_WDATA + dma_offset, 0x80000001);
4459 /* For DMAs 2-7, need to restore WR_AWUSER_31_11 as it can be
4460 * modified by the user for SRAM reduction
4463 WREG32(mmDMA0_CORE_WR_AWUSER_31_11 + dma_offset,
4468 static void gaudi_restore_qm_registers(struct hl_device *hdev)
4473 for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
4474 qman_offset = i * DMA_QMAN_OFFSET;
4475 WREG32(mmDMA0_QM_ARB_CFG_0 + qman_offset, 0);
4478 for (i = 0 ; i < MME_NUMBER_OF_MASTER_ENGINES ; i++) {
4479 qman_offset = i * (mmMME2_QM_BASE - mmMME0_QM_BASE);
4480 WREG32(mmMME0_QM_ARB_CFG_0 + qman_offset, 0);
4483 for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
4484 qman_offset = i * TPC_QMAN_OFFSET;
4485 WREG32(mmTPC0_QM_ARB_CFG_0 + qman_offset, 0);
4489 static void gaudi_restore_user_registers(struct hl_device *hdev)
4491 gaudi_restore_sm_registers(hdev);
4492 gaudi_restore_dma_registers(hdev);
4493 gaudi_restore_qm_registers(hdev);
4496 static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
4498 struct asic_fixed_properties *prop = &hdev->asic_prop;
4499 u64 addr = prop->sram_user_base_address;
4500 u32 size = hdev->pldm ? 0x10000 :
4501 (prop->sram_size - SRAM_USER_BASE_OFFSET);
4502 u64 val = 0x7777777777777777ull;
4505 rc = gaudi_memset_device_memory(hdev, addr, size, val);
4507 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4511 gaudi_mmu_prepare(hdev, asid);
4513 gaudi_restore_user_registers(hdev);
4518 static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
4520 struct asic_fixed_properties *prop = &hdev->asic_prop;
4521 struct gaudi_device *gaudi = hdev->asic_specific;
4522 u64 addr = prop->mmu_pgt_addr;
4523 u32 size = prop->mmu_pgt_size + MMU_CACHE_MNG_SIZE;
4525 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
4528 return gaudi_memset_device_memory(hdev, addr, size, 0);
4531 static void gaudi_restore_phase_topology(struct hl_device *hdev)
4536 static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
4538 struct asic_fixed_properties *prop = &hdev->asic_prop;
4539 struct gaudi_device *gaudi = hdev->asic_specific;
4543 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4545 if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
4546 (hdev->clock_gating_mask &
4547 GAUDI_CLK_GATE_DEBUGFS_MASK)) {
4549 dev_err_ratelimited(hdev->dev,
4550 "Can't read register - clock gating is enabled!\n");
4553 *val = RREG32(addr - CFG_BASE);
4556 } else if ((addr >= SRAM_BASE_ADDR) &&
4557 (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
4558 *val = readl(hdev->pcie_bar[SRAM_BAR_ID] +
4559 (addr - SRAM_BASE_ADDR));
4560 } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
4561 u64 bar_base_addr = DRAM_PHYS_BASE +
4562 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4564 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
4565 if (hbm_bar_addr != U64_MAX) {
4566 *val = readl(hdev->pcie_bar[HBM_BAR_ID] +
4567 (addr - bar_base_addr));
4569 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
4572 if (hbm_bar_addr == U64_MAX)
4574 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4575 *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
4583 static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
4585 struct asic_fixed_properties *prop = &hdev->asic_prop;
4586 struct gaudi_device *gaudi = hdev->asic_specific;
4590 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4592 if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
4593 (hdev->clock_gating_mask &
4594 GAUDI_CLK_GATE_DEBUGFS_MASK)) {
4596 dev_err_ratelimited(hdev->dev,
4597 "Can't write register - clock gating is enabled!\n");
4600 WREG32(addr - CFG_BASE, val);
4603 } else if ((addr >= SRAM_BASE_ADDR) &&
4604 (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
4605 writel(val, hdev->pcie_bar[SRAM_BAR_ID] +
4606 (addr - SRAM_BASE_ADDR));
4607 } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
4608 u64 bar_base_addr = DRAM_PHYS_BASE +
4609 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4611 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
4612 if (hbm_bar_addr != U64_MAX) {
4613 writel(val, hdev->pcie_bar[HBM_BAR_ID] +
4614 (addr - bar_base_addr));
4616 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
4619 if (hbm_bar_addr == U64_MAX)
4621 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4622 *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
4630 static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
4632 struct asic_fixed_properties *prop = &hdev->asic_prop;
4633 struct gaudi_device *gaudi = hdev->asic_specific;
4637 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
4639 if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
4640 (hdev->clock_gating_mask &
4641 GAUDI_CLK_GATE_DEBUGFS_MASK)) {
4643 dev_err_ratelimited(hdev->dev,
4644 "Can't read register - clock gating is enabled!\n");
4647 u32 val_l = RREG32(addr - CFG_BASE);
4648 u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
4650 *val = (((u64) val_h) << 32) | val_l;
4653 } else if ((addr >= SRAM_BASE_ADDR) &&
4654 (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
4655 *val = readq(hdev->pcie_bar[SRAM_BAR_ID] +
4656 (addr - SRAM_BASE_ADDR));
4658 DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
4659 u64 bar_base_addr = DRAM_PHYS_BASE +
4660 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4662 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
4663 if (hbm_bar_addr != U64_MAX) {
4664 *val = readq(hdev->pcie_bar[HBM_BAR_ID] +
4665 (addr - bar_base_addr));
4667 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
4670 if (hbm_bar_addr == U64_MAX)
4672 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4673 *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
4681 static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
4683 struct asic_fixed_properties *prop = &hdev->asic_prop;
4684 struct gaudi_device *gaudi = hdev->asic_specific;
4688 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
4690 if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
4691 (hdev->clock_gating_mask &
4692 GAUDI_CLK_GATE_DEBUGFS_MASK)) {
4694 dev_err_ratelimited(hdev->dev,
4695 "Can't write register - clock gating is enabled!\n");
4698 WREG32(addr - CFG_BASE, lower_32_bits(val));
4699 WREG32(addr + sizeof(u32) - CFG_BASE,
4700 upper_32_bits(val));
4703 } else if ((addr >= SRAM_BASE_ADDR) &&
4704 (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
4705 writeq(val, hdev->pcie_bar[SRAM_BAR_ID] +
4706 (addr - SRAM_BASE_ADDR));
4708 DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
4709 u64 bar_base_addr = DRAM_PHYS_BASE +
4710 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4712 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
4713 if (hbm_bar_addr != U64_MAX) {
4714 writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
4715 (addr - bar_base_addr));
4717 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
4720 if (hbm_bar_addr == U64_MAX)
4722 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4723 *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
4731 static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr)
4733 struct gaudi_device *gaudi = hdev->asic_specific;
4735 if (hdev->hard_reset_pending)
4738 return readq(hdev->pcie_bar[HBM_BAR_ID] +
4739 (addr - gaudi->hbm_bar_cur_addr));
4742 static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4744 struct gaudi_device *gaudi = hdev->asic_specific;
4746 if (hdev->hard_reset_pending)
4749 writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
4750 (addr - gaudi->hbm_bar_cur_addr));
4753 void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
4755 /* mask to zero the MMBP and ASID bits */
4756 WREG32_AND(reg, ~0x7FF);
4757 WREG32_OR(reg, asid);
4760 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
4762 struct gaudi_device *gaudi = hdev->asic_specific;
4764 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
4767 if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) {
4768 WARN(1, "asid %u is too big\n", asid);
4772 mutex_lock(&gaudi->clk_gate_mutex);
4774 hdev->asic_funcs->disable_clock_gating(hdev);
4776 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_0, asid);
4777 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_1, asid);
4778 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_2, asid);
4779 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_3, asid);
4780 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_4, asid);
4782 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_0, asid);
4783 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_1, asid);
4784 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_2, asid);
4785 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_3, asid);
4786 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_4, asid);
4788 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_0, asid);
4789 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_1, asid);
4790 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_2, asid);
4791 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_3, asid);
4792 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_4, asid);
4794 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_0, asid);
4795 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_1, asid);
4796 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_2, asid);
4797 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_3, asid);
4798 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_4, asid);
4800 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_0, asid);
4801 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_1, asid);
4802 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_2, asid);
4803 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_3, asid);
4804 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_4, asid);
4806 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_0, asid);
4807 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_1, asid);
4808 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_2, asid);
4809 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_3, asid);
4810 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_4, asid);
4812 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_0, asid);
4813 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_1, asid);
4814 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_2, asid);
4815 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_3, asid);
4816 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_4, asid);
4818 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_0, asid);
4819 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_1, asid);
4820 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_2, asid);
4821 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_3, asid);
4822 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_4, asid);
4824 gaudi_mmu_prepare_reg(hdev, mmDMA0_CORE_NON_SECURE_PROPS, asid);
4825 gaudi_mmu_prepare_reg(hdev, mmDMA1_CORE_NON_SECURE_PROPS, asid);
4826 gaudi_mmu_prepare_reg(hdev, mmDMA2_CORE_NON_SECURE_PROPS, asid);
4827 gaudi_mmu_prepare_reg(hdev, mmDMA3_CORE_NON_SECURE_PROPS, asid);
4828 gaudi_mmu_prepare_reg(hdev, mmDMA4_CORE_NON_SECURE_PROPS, asid);
4829 gaudi_mmu_prepare_reg(hdev, mmDMA5_CORE_NON_SECURE_PROPS, asid);
4830 gaudi_mmu_prepare_reg(hdev, mmDMA6_CORE_NON_SECURE_PROPS, asid);
4831 gaudi_mmu_prepare_reg(hdev, mmDMA7_CORE_NON_SECURE_PROPS, asid);
4833 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_0, asid);
4834 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_1, asid);
4835 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_2, asid);
4836 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_3, asid);
4837 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_4, asid);
4838 gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_ARUSER_LO, asid);
4839 gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_AWUSER_LO, asid);
4841 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_0, asid);
4842 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_1, asid);
4843 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_2, asid);
4844 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_3, asid);
4845 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_4, asid);
4846 gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_ARUSER_LO, asid);
4847 gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_AWUSER_LO, asid);
4849 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_0, asid);
4850 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_1, asid);
4851 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_2, asid);
4852 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_3, asid);
4853 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_4, asid);
4854 gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_ARUSER_LO, asid);
4855 gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_AWUSER_LO, asid);
4857 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_0, asid);
4858 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_1, asid);
4859 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_2, asid);
4860 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_3, asid);
4861 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_4, asid);
4862 gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_ARUSER_LO, asid);
4863 gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_AWUSER_LO, asid);
4865 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_0, asid);
4866 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_1, asid);
4867 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_2, asid);
4868 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_3, asid);
4869 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_4, asid);
4870 gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_ARUSER_LO, asid);
4871 gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_AWUSER_LO, asid);
4873 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_0, asid);
4874 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_1, asid);
4875 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_2, asid);
4876 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_3, asid);
4877 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_4, asid);
4878 gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_ARUSER_LO, asid);
4879 gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_AWUSER_LO, asid);
4881 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_0, asid);
4882 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_1, asid);
4883 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_2, asid);
4884 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_3, asid);
4885 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_4, asid);
4886 gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_ARUSER_LO, asid);
4887 gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_AWUSER_LO, asid);
4889 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_0, asid);
4890 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_1, asid);
4891 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_2, asid);
4892 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_3, asid);
4893 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_4, asid);
4894 gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_ARUSER_LO, asid);
4895 gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_AWUSER_LO, asid);
4897 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_0, asid);
4898 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_1, asid);
4899 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_2, asid);
4900 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_3, asid);
4901 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_4, asid);
4902 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_0, asid);
4903 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_1, asid);
4904 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_2, asid);
4905 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_3, asid);
4906 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_4, asid);
4908 gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER0, asid);
4909 gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER1, asid);
4910 gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER0, asid);
4911 gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER1, asid);
4912 gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER0, asid);
4913 gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER1, asid);
4914 gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER0, asid);
4915 gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER1, asid);
4916 gaudi_mmu_prepare_reg(hdev, mmMME0_ACC_WBC, asid);
4917 gaudi_mmu_prepare_reg(hdev, mmMME1_ACC_WBC, asid);
4918 gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid);
4919 gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid);
4921 hdev->asic_funcs->set_clock_gating(hdev);
4923 mutex_unlock(&gaudi->clk_gate_mutex);
4926 static int gaudi_send_job_on_qman0(struct hl_device *hdev,
4927 struct hl_cs_job *job)
4929 struct packet_msg_prot *fence_pkt;
4931 dma_addr_t fence_dma_addr;
4933 u32 tmp, timeout, dma_offset;
4937 timeout = GAUDI_PLDM_QMAN0_TIMEOUT_USEC;
4939 timeout = HL_DEVICE_TIMEOUT_USEC;
4941 if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
4942 dev_err_ratelimited(hdev->dev,
4943 "Can't send driver job on QMAN0 because the device is not idle\n");
4947 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
4951 "Failed to allocate fence memory for QMAN0\n");
4955 cb = job->patched_cb;
4957 fence_pkt = cb->kernel_address +
4958 job->job_cb_size - sizeof(struct packet_msg_prot);
4960 tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
4961 tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
4962 tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
4964 fence_pkt->ctl = cpu_to_le32(tmp);
4965 fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL);
4966 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
4968 dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET;
4970 WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
4972 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0,
4973 job->job_cb_size, cb->bus_address);
4975 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
4976 goto free_fence_ptr;
4979 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
4980 (tmp == GAUDI_QMAN0_FENCE_VAL), 1000,
4983 hl_hw_queue_inc_ci_kernel(hdev, GAUDI_QUEUE_ID_DMA_0_0);
4985 if (rc == -ETIMEDOUT) {
4986 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
4987 goto free_fence_ptr;
4991 WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
4992 ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
4994 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
4999 static void gaudi_get_event_desc(u16 event_type, char *desc, size_t size)
5001 if (event_type >= GAUDI_EVENT_SIZE)
5002 goto event_not_supported;
5004 if (!gaudi_irq_map_table[event_type].valid)
5005 goto event_not_supported;
5007 snprintf(desc, size, gaudi_irq_map_table[event_type].name);
5011 event_not_supported:
5012 snprintf(desc, size, "N/A");
5015 static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev,
5016 u32 x_y, bool is_write)
5018 u32 dma_id[2], dma_offset, err_cause[2], mask, i;
5020 mask = is_write ? DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK :
5021 DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK;
5024 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
5025 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
5029 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
5030 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
5034 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
5035 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
5039 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
5040 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
5045 goto unknown_initiator;
5048 for (i = 0 ; i < 2 ; i++) {
5049 dma_offset = dma_id[i] * DMA_CORE_OFFSET;
5050 err_cause[i] = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
5054 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
5055 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
5056 if ((err_cause[0] & mask) && !(err_cause[1] & mask))
5058 else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
5061 return "DMA0 or DMA2";
5062 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
5063 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
5064 if ((err_cause[0] & mask) && !(err_cause[1] & mask))
5066 else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
5069 return "DMA1 or DMA3";
5070 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
5071 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
5072 if ((err_cause[0] & mask) && !(err_cause[1] & mask))
5074 else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
5077 return "DMA4 or DMA6";
5078 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
5079 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
5080 if ((err_cause[0] & mask) && !(err_cause[1] & mask))
5082 else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
5085 return "DMA5 or DMA7";
5089 return "unknown initiator";
5092 static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
5095 u32 val, x_y, axi_id;
5097 val = is_write ? RREG32(mmMMU_UP_RAZWI_WRITE_ID) :
5098 RREG32(mmMMU_UP_RAZWI_READ_ID);
5099 x_y = val & ((RAZWI_INITIATOR_Y_MASK << RAZWI_INITIATOR_Y_SHIFT) |
5100 (RAZWI_INITIATOR_X_MASK << RAZWI_INITIATOR_X_SHIFT));
5101 axi_id = val & (RAZWI_INITIATOR_AXI_ID_MASK <<
5102 RAZWI_INITIATOR_AXI_ID_SHIFT);
5105 case RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0:
5106 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
5108 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
5111 case RAZWI_INITIATOR_ID_X_Y_TPC1:
5113 case RAZWI_INITIATOR_ID_X_Y_MME0_0:
5114 case RAZWI_INITIATOR_ID_X_Y_MME0_1:
5116 case RAZWI_INITIATOR_ID_X_Y_MME1_0:
5117 case RAZWI_INITIATOR_ID_X_Y_MME1_1:
5119 case RAZWI_INITIATOR_ID_X_Y_TPC2:
5121 case RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC:
5122 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
5124 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PCI))
5126 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_CPU))
5128 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PSOC))
5131 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
5132 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
5133 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
5134 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
5135 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
5136 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
5137 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
5138 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
5139 return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write);
5140 case RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2:
5141 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
5143 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
5145 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
5148 case RAZWI_INITIATOR_ID_X_Y_TPC5:
5150 case RAZWI_INITIATOR_ID_X_Y_MME2_0:
5151 case RAZWI_INITIATOR_ID_X_Y_MME2_1:
5153 case RAZWI_INITIATOR_ID_X_Y_MME3_0:
5154 case RAZWI_INITIATOR_ID_X_Y_MME3_1:
5156 case RAZWI_INITIATOR_ID_X_Y_TPC6:
5158 case RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5:
5159 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
5161 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
5163 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
5171 "Unknown RAZWI initiator ID 0x%x [Y=%d, X=%d, AXI_ID=%d]\n",
5173 (val >> RAZWI_INITIATOR_Y_SHIFT) & RAZWI_INITIATOR_Y_MASK,
5174 (val >> RAZWI_INITIATOR_X_SHIFT) & RAZWI_INITIATOR_X_MASK,
5175 (val >> RAZWI_INITIATOR_AXI_ID_SHIFT) &
5176 RAZWI_INITIATOR_AXI_ID_MASK);
5178 return "unknown initiator";
5181 static void gaudi_print_razwi_info(struct hl_device *hdev)
5183 if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) {
5184 dev_err_ratelimited(hdev->dev,
5185 "RAZWI event caused by illegal write of %s\n",
5186 gaudi_get_razwi_initiator_name(hdev, true));
5187 WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0);
5190 if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) {
5191 dev_err_ratelimited(hdev->dev,
5192 "RAZWI event caused by illegal read of %s\n",
5193 gaudi_get_razwi_initiator_name(hdev, false));
5194 WREG32(mmMMU_UP_RAZWI_READ_VLD, 0);
5198 static void gaudi_print_mmu_error_info(struct hl_device *hdev)
5200 struct gaudi_device *gaudi = hdev->asic_specific;
5204 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
5207 val = RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE);
5208 if (val & MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
5209 addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
5211 addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
5213 dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
5216 WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0);
5219 val = RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE);
5220 if (val & MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK) {
5221 addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK;
5223 addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
5225 dev_err_ratelimited(hdev->dev,
5226 "MMU access error on va 0x%llx\n", addr);
5228 WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0);
5233 * +-------------------+------------------------------------------------------+
5234 * | Configuration Reg | Description |
5236 * +-------------------+------------------------------------------------------+
5237 * | 0xF30 - 0xF3F |ECC single error indication (1 bit per memory wrapper)|
5238 * | |0xF30 memory wrappers 31:0 (MSB to LSB) |
5239 * | |0xF34 memory wrappers 63:32 |
5240 * | |0xF38 memory wrappers 95:64 |
5241 * | |0xF3C memory wrappers 127:96 |
5242 * +-------------------+------------------------------------------------------+
5243 * | 0xF40 - 0xF4F |ECC double error indication (1 bit per memory wrapper)|
5244 * | |0xF40 memory wrappers 31:0 (MSB to LSB) |
5245 * | |0xF44 memory wrappers 63:32 |
5246 * | |0xF48 memory wrappers 95:64 |
5247 * | |0xF4C memory wrappers 127:96 |
5248 * +-------------------+------------------------------------------------------+
5250 static int gaudi_extract_ecc_info(struct hl_device *hdev,
5251 struct ecc_info_extract_params *params, u64 *ecc_address,
5252 u64 *ecc_syndrom, u8 *memory_wrapper_idx)
5254 struct gaudi_device *gaudi = hdev->asic_specific;
5255 u32 i, num_mem_regs, reg, err_bit;
5256 u64 err_addr, err_word = 0;
5259 num_mem_regs = params->num_memories / 32 +
5260 ((params->num_memories % 32) ? 1 : 0);
5262 if (params->block_address >= CFG_BASE)
5263 params->block_address -= CFG_BASE;
5266 err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET;
5268 err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET;
5270 if (params->disable_clock_gating) {
5271 mutex_lock(&gaudi->clk_gate_mutex);
5272 hdev->asic_funcs->disable_clock_gating(hdev);
5275 /* Set invalid wrapper index */
5276 *memory_wrapper_idx = 0xFF;
5278 /* Iterate through memory wrappers, a single bit must be set */
5279 for (i = 0 ; i < num_mem_regs ; i++) {
5281 err_word = RREG32(err_addr);
5283 err_bit = __ffs(err_word);
5284 *memory_wrapper_idx = err_bit + (32 * i);
5289 if (*memory_wrapper_idx == 0xFF) {
5290 dev_err(hdev->dev, "ECC error information cannot be found\n");
5292 goto enable_clk_gate;
5295 WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET,
5296 *memory_wrapper_idx);
5299 RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET);
5301 RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET);
5303 /* Clear error indication */
5304 reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET);
5306 reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_DERR_MASK, 1);
5308 reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_SERR_MASK, 1);
5310 WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg);
5313 if (params->disable_clock_gating) {
5314 hdev->asic_funcs->set_clock_gating(hdev);
5316 mutex_unlock(&gaudi->clk_gate_mutex);
5322 static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
5323 const char *qm_name,
5327 u32 i, j, glbl_sts_val, arb_err_val, glbl_sts_clr_val;
5330 /* Iterate through all stream GLBL_STS1 registers + Lower CP */
5331 for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) {
5332 glbl_sts_clr_val = 0;
5333 glbl_sts_val = RREG32(glbl_sts_addr + 4 * i);
5338 if (i == QMAN_STREAMS)
5339 snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerCP");
5341 snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i);
5343 for (j = 0 ; j < GAUDI_NUM_OF_QM_ERR_CAUSE ; j++) {
5344 if (glbl_sts_val & BIT(j)) {
5345 dev_err_ratelimited(hdev->dev,
5346 "%s %s. err cause: %s\n",
5348 gaudi_qman_error_cause[j]);
5349 glbl_sts_clr_val |= BIT(j);
5353 /* Write 1 clear errors */
5354 WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
5357 arb_err_val = RREG32(arb_err_addr);
5362 for (j = 0 ; j < GAUDI_NUM_OF_QM_ARB_ERR_CAUSE ; j++) {
5363 if (arb_err_val & BIT(j)) {
5364 dev_err_ratelimited(hdev->dev,
5365 "%s ARB_ERR. err cause: %s\n",
5367 gaudi_qman_arb_error_cause[j]);
5372 static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
5373 struct hl_eq_ecc_data *ecc_data)
5375 struct ecc_info_extract_params params;
5376 u64 ecc_address = 0, ecc_syndrom = 0;
5377 u8 index, memory_wrapper_idx = 0;
5378 bool extract_info_from_fw;
5381 switch (event_type) {
5382 case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR:
5383 case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR:
5384 extract_info_from_fw = true;
5386 case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
5387 index = event_type - GAUDI_EVENT_TPC0_SERR;
5388 params.block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
5389 params.num_memories = 90;
5390 params.derr = false;
5391 params.disable_clock_gating = true;
5392 extract_info_from_fw = false;
5394 case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
5395 index = event_type - GAUDI_EVENT_TPC0_DERR;
5396 params.block_address =
5397 mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
5398 params.num_memories = 90;
5400 params.disable_clock_gating = true;
5401 extract_info_from_fw = false;
5403 case GAUDI_EVENT_MME0_ACC_SERR:
5404 case GAUDI_EVENT_MME1_ACC_SERR:
5405 case GAUDI_EVENT_MME2_ACC_SERR:
5406 case GAUDI_EVENT_MME3_ACC_SERR:
5407 index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4;
5408 params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
5409 params.num_memories = 128;
5410 params.derr = false;
5411 params.disable_clock_gating = true;
5412 extract_info_from_fw = false;
5414 case GAUDI_EVENT_MME0_ACC_DERR:
5415 case GAUDI_EVENT_MME1_ACC_DERR:
5416 case GAUDI_EVENT_MME2_ACC_DERR:
5417 case GAUDI_EVENT_MME3_ACC_DERR:
5418 index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4;
5419 params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
5420 params.num_memories = 128;
5422 params.disable_clock_gating = true;
5423 extract_info_from_fw = false;
5425 case GAUDI_EVENT_MME0_SBAB_SERR:
5426 case GAUDI_EVENT_MME1_SBAB_SERR:
5427 case GAUDI_EVENT_MME2_SBAB_SERR:
5428 case GAUDI_EVENT_MME3_SBAB_SERR:
5429 index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4;
5430 params.block_address =
5431 mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
5432 params.num_memories = 33;
5433 params.derr = false;
5434 params.disable_clock_gating = true;
5435 extract_info_from_fw = false;
5437 case GAUDI_EVENT_MME0_SBAB_DERR:
5438 case GAUDI_EVENT_MME1_SBAB_DERR:
5439 case GAUDI_EVENT_MME2_SBAB_DERR:
5440 case GAUDI_EVENT_MME3_SBAB_DERR:
5441 index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4;
5442 params.block_address =
5443 mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
5444 params.num_memories = 33;
5446 params.disable_clock_gating = true;
5447 extract_info_from_fw = false;
5453 if (extract_info_from_fw) {
5454 ecc_address = le64_to_cpu(ecc_data->ecc_address);
5455 ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
5456 memory_wrapper_idx = ecc_data->memory_wrapper_idx;
5458 rc = gaudi_extract_ecc_info(hdev, ¶ms, &ecc_address,
5459 &ecc_syndrom, &memory_wrapper_idx);
5465 "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u\n",
5466 ecc_address, ecc_syndrom, memory_wrapper_idx);
5469 static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
5471 u64 glbl_sts_addr, arb_err_addr;
5475 switch (event_type) {
5476 case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
5477 index = event_type - GAUDI_EVENT_TPC0_QM;
5479 mmTPC0_QM_GLBL_STS1_0 + index * TPC_QMAN_OFFSET;
5481 mmTPC0_QM_ARB_ERR_CAUSE + index * TPC_QMAN_OFFSET;
5482 snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC_QM", index);
5484 case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
5485 index = event_type - GAUDI_EVENT_MME0_QM;
5487 mmMME0_QM_GLBL_STS1_0 + index * MME_QMAN_OFFSET;
5489 mmMME0_QM_ARB_ERR_CAUSE + index * MME_QMAN_OFFSET;
5490 snprintf(desc, ARRAY_SIZE(desc), "%s%d", "MME_QM", index);
5492 case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
5493 index = event_type - GAUDI_EVENT_DMA0_QM;
5495 mmDMA0_QM_GLBL_STS1_0 + index * DMA_QMAN_OFFSET;
5497 mmDMA0_QM_ARB_ERR_CAUSE + index * DMA_QMAN_OFFSET;
5498 snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index);
5504 gaudi_handle_qman_err_generic(hdev, desc, glbl_sts_addr, arb_err_addr);
5507 static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
5512 gaudi_get_event_desc(event_type, desc, sizeof(desc));
5513 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
5517 gaudi_print_razwi_info(hdev);
5518 gaudi_print_mmu_error_info(hdev);
5522 static int gaudi_soft_reset_late_init(struct hl_device *hdev)
5524 struct gaudi_device *gaudi = hdev->asic_specific;
5526 /* Unmask all IRQs since some could have been received
5527 * during the soft reset
5529 return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events));
5532 static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device)
5535 u32 base, val, val2;
5537 base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET;
5538 for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) {
5539 val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF);
5540 val = (val & 0xFF) | ((val >> 8) & 0xFF);
5544 "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
5545 device, ch * 2, val & 0x1, (val >> 1) & 0x1,
5546 (val >> 2) & 0x1, (val >> 3) & 0x1,
5549 val2 = RREG32(base + ch * 0x1000 + 0x060);
5551 "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n",
5553 RREG32(base + ch * 0x1000 + 0x064),
5554 (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
5555 (val2 & 0xFF0000) >> 16,
5556 (val2 & 0xFF000000) >> 24);
5559 val = RREG32_MASK(base + ch * 0x1000 + 0x07C, 0x0000FFFF);
5560 val = (val & 0xFF) | ((val >> 8) & 0xFF);
5564 "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
5565 device, ch * 2 + 1, val & 0x1, (val >> 1) & 0x1,
5566 (val >> 2) & 0x1, (val >> 3) & 0x1,
5569 val2 = RREG32(base + ch * 0x1000 + 0x070);
5571 "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n",
5573 RREG32(base + ch * 0x1000 + 0x074),
5574 (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
5575 (val2 & 0xFF0000) >> 16,
5576 (val2 & 0xFF000000) >> 24);
5579 /* Clear interrupts */
5580 RMWREG32(base + (ch * 0x1000) + 0x060, 0x1C8, 0x1FF);
5581 RMWREG32(base + (ch * 0x1000) + 0x070, 0x1C8, 0x1FF);
5582 WREG32(base + (ch * 0x1000) + 0x06C, 0x1F1F);
5583 WREG32(base + (ch * 0x1000) + 0x07C, 0x1F1F);
5584 RMWREG32(base + (ch * 0x1000) + 0x060, 0x0, 0xF);
5585 RMWREG32(base + (ch * 0x1000) + 0x070, 0x0, 0xF);
5588 val = RREG32(base + 0x8F30);
5589 val2 = RREG32(base + 0x8F34);
5593 "HBM %d MC SRAM SERR info: Reg 0x8F30=0x%x, Reg 0x8F34=0x%x\n",
5596 val = RREG32(base + 0x8F40);
5597 val2 = RREG32(base + 0x8F44);
5601 "HBM %d MC SRAM DERR info: Reg 0x8F40=0x%x, Reg 0x8F44=0x%x\n",
5608 static int gaudi_hbm_event_to_dev(u16 hbm_event_type)
5610 switch (hbm_event_type) {
5611 case GAUDI_EVENT_HBM0_SPI_0:
5612 case GAUDI_EVENT_HBM0_SPI_1:
5614 case GAUDI_EVENT_HBM1_SPI_0:
5615 case GAUDI_EVENT_HBM1_SPI_1:
5617 case GAUDI_EVENT_HBM2_SPI_0:
5618 case GAUDI_EVENT_HBM2_SPI_1:
5620 case GAUDI_EVENT_HBM3_SPI_0:
5621 case GAUDI_EVENT_HBM3_SPI_1:
5627 /* Should never happen */
5631 static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
5632 char *interrupt_name)
5634 struct gaudi_device *gaudi = hdev->asic_specific;
5635 u32 tpc_offset = tpc_id * TPC_CFG_OFFSET, tpc_interrupts_cause, i;
5636 bool soft_reset_required = false;
5638 /* Accessing the TPC_INTR_CAUSE registers requires disabling the clock
5639 * gating, and thus cannot be done in CPU-CP and should be done instead
5643 mutex_lock(&gaudi->clk_gate_mutex);
5645 hdev->asic_funcs->disable_clock_gating(hdev);
5647 tpc_interrupts_cause = RREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset) &
5648 TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK;
5650 for (i = 0 ; i < GAUDI_NUM_OF_TPC_INTR_CAUSE ; i++)
5651 if (tpc_interrupts_cause & BIT(i)) {
5652 dev_err_ratelimited(hdev->dev,
5653 "TPC%d_%s interrupt cause: %s\n",
5654 tpc_id, interrupt_name,
5655 gaudi_tpc_interrupts_cause[i]);
5656 /* If this is QM error, we need to soft-reset */
5658 soft_reset_required = true;
5661 /* Clear interrupts */
5662 WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
5664 hdev->asic_funcs->set_clock_gating(hdev);
5666 mutex_unlock(&gaudi->clk_gate_mutex);
5668 return soft_reset_required;
5671 static int tpc_dec_event_to_tpc_id(u16 tpc_dec_event_type)
5673 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_DEC) >> 1;
5676 static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
5678 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6;
5681 static void gaudi_print_clk_change_info(struct hl_device *hdev,
5684 switch (event_type) {
5685 case GAUDI_EVENT_FIX_POWER_ENV_S:
5686 hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER;
5687 dev_info_ratelimited(hdev->dev,
5688 "Clock throttling due to power consumption\n");
5691 case GAUDI_EVENT_FIX_POWER_ENV_E:
5692 hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER;
5693 dev_info_ratelimited(hdev->dev,
5694 "Power envelop is safe, back to optimal clock\n");
5697 case GAUDI_EVENT_FIX_THERMAL_ENV_S:
5698 hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL;
5699 dev_info_ratelimited(hdev->dev,
5700 "Clock throttling due to overheating\n");
5703 case GAUDI_EVENT_FIX_THERMAL_ENV_E:
5704 hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL;
5705 dev_info_ratelimited(hdev->dev,
5706 "Thermal envelop is safe, back to optimal clock\n");
5710 dev_err(hdev->dev, "Received invalid clock change event %d\n",
5716 static void gaudi_handle_eqe(struct hl_device *hdev,
5717 struct hl_eq_entry *eq_entry)
5719 struct gaudi_device *gaudi = hdev->asic_specific;
5720 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
5721 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
5722 >> EQ_CTL_EVENT_TYPE_SHIFT);
5724 bool reset_required;
5726 if (event_type >= GAUDI_EVENT_SIZE) {
5727 dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
5728 event_type, GAUDI_EVENT_SIZE - 1);
5732 gaudi->events_stat[event_type]++;
5733 gaudi->events_stat_aggregate[event_type]++;
5735 switch (event_type) {
5736 case GAUDI_EVENT_PCIE_CORE_DERR:
5737 case GAUDI_EVENT_PCIE_IF_DERR:
5738 case GAUDI_EVENT_PCIE_PHY_DERR:
5739 case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
5740 case GAUDI_EVENT_MME0_ACC_DERR:
5741 case GAUDI_EVENT_MME0_SBAB_DERR:
5742 case GAUDI_EVENT_MME1_ACC_DERR:
5743 case GAUDI_EVENT_MME1_SBAB_DERR:
5744 case GAUDI_EVENT_MME2_ACC_DERR:
5745 case GAUDI_EVENT_MME2_SBAB_DERR:
5746 case GAUDI_EVENT_MME3_ACC_DERR:
5747 case GAUDI_EVENT_MME3_SBAB_DERR:
5748 case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
5750 case GAUDI_EVENT_CPU_IF_ECC_DERR:
5751 case GAUDI_EVENT_PSOC_MEM_DERR:
5752 case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
5753 case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
5754 case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
5755 case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
5756 case GAUDI_EVENT_MMU_DERR:
5757 gaudi_print_irq_info(hdev, event_type, true);
5758 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
5759 if (hdev->hard_reset_on_fw_events)
5760 hl_device_reset(hdev, true, false);
5763 case GAUDI_EVENT_GIC500:
5764 case GAUDI_EVENT_AXI_ECC:
5765 case GAUDI_EVENT_L2_RAM_ECC:
5766 case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
5767 gaudi_print_irq_info(hdev, event_type, false);
5768 if (hdev->hard_reset_on_fw_events)
5769 hl_device_reset(hdev, true, false);
5772 case GAUDI_EVENT_HBM0_SPI_0:
5773 case GAUDI_EVENT_HBM1_SPI_0:
5774 case GAUDI_EVENT_HBM2_SPI_0:
5775 case GAUDI_EVENT_HBM3_SPI_0:
5776 gaudi_print_irq_info(hdev, event_type, false);
5777 gaudi_hbm_read_interrupts(hdev,
5778 gaudi_hbm_event_to_dev(event_type));
5779 if (hdev->hard_reset_on_fw_events)
5780 hl_device_reset(hdev, true, false);
5783 case GAUDI_EVENT_HBM0_SPI_1:
5784 case GAUDI_EVENT_HBM1_SPI_1:
5785 case GAUDI_EVENT_HBM2_SPI_1:
5786 case GAUDI_EVENT_HBM3_SPI_1:
5787 gaudi_print_irq_info(hdev, event_type, false);
5788 gaudi_hbm_read_interrupts(hdev,
5789 gaudi_hbm_event_to_dev(event_type));
5792 case GAUDI_EVENT_TPC0_DEC:
5793 case GAUDI_EVENT_TPC1_DEC:
5794 case GAUDI_EVENT_TPC2_DEC:
5795 case GAUDI_EVENT_TPC3_DEC:
5796 case GAUDI_EVENT_TPC4_DEC:
5797 case GAUDI_EVENT_TPC5_DEC:
5798 case GAUDI_EVENT_TPC6_DEC:
5799 case GAUDI_EVENT_TPC7_DEC:
5800 gaudi_print_irq_info(hdev, event_type, true);
5801 reset_required = gaudi_tpc_read_interrupts(hdev,
5802 tpc_dec_event_to_tpc_id(event_type),
5803 "AXI_SLV_DEC_Error");
5804 if (reset_required) {
5805 dev_err(hdev->dev, "hard reset required due to %s\n",
5806 gaudi_irq_map_table[event_type].name);
5808 if (hdev->hard_reset_on_fw_events)
5809 hl_device_reset(hdev, true, false);
5811 hl_fw_unmask_irq(hdev, event_type);
5815 case GAUDI_EVENT_TPC0_KRN_ERR:
5816 case GAUDI_EVENT_TPC1_KRN_ERR:
5817 case GAUDI_EVENT_TPC2_KRN_ERR:
5818 case GAUDI_EVENT_TPC3_KRN_ERR:
5819 case GAUDI_EVENT_TPC4_KRN_ERR:
5820 case GAUDI_EVENT_TPC5_KRN_ERR:
5821 case GAUDI_EVENT_TPC6_KRN_ERR:
5822 case GAUDI_EVENT_TPC7_KRN_ERR:
5823 gaudi_print_irq_info(hdev, event_type, true);
5824 reset_required = gaudi_tpc_read_interrupts(hdev,
5825 tpc_krn_event_to_tpc_id(event_type),
5827 if (reset_required) {
5828 dev_err(hdev->dev, "hard reset required due to %s\n",
5829 gaudi_irq_map_table[event_type].name);
5831 if (hdev->hard_reset_on_fw_events)
5832 hl_device_reset(hdev, true, false);
5834 hl_fw_unmask_irq(hdev, event_type);
5838 case GAUDI_EVENT_PCIE_CORE_SERR:
5839 case GAUDI_EVENT_PCIE_IF_SERR:
5840 case GAUDI_EVENT_PCIE_PHY_SERR:
5841 case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
5842 case GAUDI_EVENT_MME0_ACC_SERR:
5843 case GAUDI_EVENT_MME0_SBAB_SERR:
5844 case GAUDI_EVENT_MME1_ACC_SERR:
5845 case GAUDI_EVENT_MME1_SBAB_SERR:
5846 case GAUDI_EVENT_MME2_ACC_SERR:
5847 case GAUDI_EVENT_MME2_SBAB_SERR:
5848 case GAUDI_EVENT_MME3_ACC_SERR:
5849 case GAUDI_EVENT_MME3_SBAB_SERR:
5850 case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
5851 case GAUDI_EVENT_CPU_IF_ECC_SERR:
5852 case GAUDI_EVENT_PSOC_MEM_SERR:
5853 case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
5854 case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
5855 case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
5856 case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
5858 case GAUDI_EVENT_MMU_SERR:
5859 gaudi_print_irq_info(hdev, event_type, true);
5860 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
5861 hl_fw_unmask_irq(hdev, event_type);
5864 case GAUDI_EVENT_PCIE_DEC:
5865 case GAUDI_EVENT_MME0_WBC_RSP:
5866 case GAUDI_EVENT_MME0_SBAB0_RSP:
5867 case GAUDI_EVENT_MME1_WBC_RSP:
5868 case GAUDI_EVENT_MME1_SBAB0_RSP:
5869 case GAUDI_EVENT_MME2_WBC_RSP:
5870 case GAUDI_EVENT_MME2_SBAB0_RSP:
5871 case GAUDI_EVENT_MME3_WBC_RSP:
5872 case GAUDI_EVENT_MME3_SBAB0_RSP:
5873 case GAUDI_EVENT_CPU_AXI_SPLITTER:
5874 case GAUDI_EVENT_PSOC_AXI_DEC:
5875 case GAUDI_EVENT_PSOC_PRSTN_FALL:
5876 case GAUDI_EVENT_MMU_PAGE_FAULT:
5877 case GAUDI_EVENT_MMU_WR_PERM:
5878 case GAUDI_EVENT_RAZWI_OR_ADC:
5879 case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
5880 case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
5881 case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
5883 case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE:
5884 gaudi_print_irq_info(hdev, event_type, true);
5885 gaudi_handle_qman_err(hdev, event_type);
5886 hl_fw_unmask_irq(hdev, event_type);
5889 case GAUDI_EVENT_RAZWI_OR_ADC_SW:
5890 gaudi_print_irq_info(hdev, event_type, true);
5891 if (hdev->hard_reset_on_fw_events)
5892 hl_device_reset(hdev, true, false);
5895 case GAUDI_EVENT_TPC0_BMON_SPMU:
5896 case GAUDI_EVENT_TPC1_BMON_SPMU:
5897 case GAUDI_EVENT_TPC2_BMON_SPMU:
5898 case GAUDI_EVENT_TPC3_BMON_SPMU:
5899 case GAUDI_EVENT_TPC4_BMON_SPMU:
5900 case GAUDI_EVENT_TPC5_BMON_SPMU:
5901 case GAUDI_EVENT_TPC6_BMON_SPMU:
5902 case GAUDI_EVENT_TPC7_BMON_SPMU:
5903 case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7:
5904 gaudi_print_irq_info(hdev, event_type, false);
5905 hl_fw_unmask_irq(hdev, event_type);
5908 case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
5909 gaudi_print_clk_change_info(hdev, event_type);
5910 hl_fw_unmask_irq(hdev, event_type);
5913 case GAUDI_EVENT_PSOC_GPIO_U16_0:
5914 cause = le64_to_cpu(eq_entry->data[0]) & 0xFF;
5916 "Received high temp H/W interrupt %d (cause %d)\n",
5921 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
5927 static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate,
5930 struct gaudi_device *gaudi = hdev->asic_specific;
5933 *size = (u32) sizeof(gaudi->events_stat_aggregate);
5934 return gaudi->events_stat_aggregate;
5937 *size = (u32) sizeof(gaudi->events_stat);
5938 return gaudi->events_stat;
5941 static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
5944 struct gaudi_device *gaudi = hdev->asic_specific;
5945 u32 status, timeout_usec;
5948 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
5949 hdev->hard_reset_pending)
5953 timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
5955 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5957 mutex_lock(&hdev->mmu_cache_lock);
5959 /* L0 & L1 invalidation */
5960 WREG32(mmSTLB_INV_PS, 3);
5961 WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
5962 WREG32(mmSTLB_INV_PS, 2);
5964 rc = hl_poll_timeout(
5972 WREG32(mmSTLB_INV_SET, 0);
5974 mutex_unlock(&hdev->mmu_cache_lock);
5977 dev_err_ratelimited(hdev->dev,
5978 "MMU cache invalidation timeout\n");
5979 hl_device_reset(hdev, true, false);
5985 static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
5986 bool is_hard, u32 asid, u64 va, u64 size)
5988 struct gaudi_device *gaudi = hdev->asic_specific;
5989 u32 status, timeout_usec;
5994 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
5995 hdev->hard_reset_pending)
5998 mutex_lock(&hdev->mmu_cache_lock);
6001 timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
6003 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
6006 * TODO: currently invalidate entire L0 & L1 as in regular hard
6007 * invalidation. Need to apply invalidation of specific cache
6008 * lines with mask of ASID & VA & size.
6009 * Note that L1 with be flushed entirely in any case.
6012 /* L0 & L1 invalidation */
6013 inv_data = RREG32(mmSTLB_CACHE_INV);
6015 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
6016 WREG32(mmSTLB_CACHE_INV,
6017 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
6019 rc = hl_poll_timeout(
6021 mmSTLB_INV_CONSUMER_INDEX,
6027 mutex_unlock(&hdev->mmu_cache_lock);
6030 dev_err_ratelimited(hdev->dev,
6031 "MMU cache invalidation timeout\n");
6032 hl_device_reset(hdev, true, false);
6038 static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev,
6039 u32 asid, u64 phys_addr)
6041 u32 status, timeout_usec;
6045 timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
6047 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
6049 WREG32(MMU_ASID, asid);
6050 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
6051 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
6052 WREG32(MMU_BUSY, 0x80000000);
6054 rc = hl_poll_timeout(
6058 !(status & 0x80000000),
6064 "Timeout during MMU hop0 config of asid %d\n", asid);
6071 static int gaudi_send_heartbeat(struct hl_device *hdev)
6073 struct gaudi_device *gaudi = hdev->asic_specific;
6075 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
6078 return hl_fw_send_heartbeat(hdev);
6081 static int gaudi_cpucp_info_get(struct hl_device *hdev)
6083 struct gaudi_device *gaudi = hdev->asic_specific;
6084 struct asic_fixed_properties *prop = &hdev->asic_prop;
6087 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
6090 rc = hl_fw_cpucp_info_get(hdev);
6094 if (!strlen(prop->cpucp_info.card_name))
6095 strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
6098 hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type);
6100 if (hdev->card_type == cpucp_card_type_pci)
6101 prop->max_power_default = MAX_POWER_DEFAULT_PCI;
6102 else if (hdev->card_type == cpucp_card_type_pmc)
6103 prop->max_power_default = MAX_POWER_DEFAULT_PMC;
6105 hdev->max_power = prop->max_power_default;
6110 static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
6113 struct gaudi_device *gaudi = hdev->asic_specific;
6114 const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
6115 const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n";
6116 u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts;
6117 bool is_idle = true, is_eng_idle, is_slave;
6121 mutex_lock(&gaudi->clk_gate_mutex);
6123 hdev->asic_funcs->disable_clock_gating(hdev);
6127 "\nDMA is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
6128 "--- ------- ------------ ---------- -------------\n");
6130 for (i = 0 ; i < DMA_NUMBER_OF_CHNLS ; i++) {
6131 dma_id = gaudi_dma_assignment[i];
6132 offset = dma_id * DMA_QMAN_OFFSET;
6134 qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + offset);
6135 qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + offset);
6136 dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + offset);
6137 is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
6138 IS_DMA_IDLE(dma_core_sts0);
6139 is_idle &= is_eng_idle;
6142 *mask |= ((u64) !is_eng_idle) <<
6143 (GAUDI_ENGINE_ID_DMA_0 + dma_id);
6145 seq_printf(s, fmt, dma_id,
6146 is_eng_idle ? "Y" : "N", qm_glbl_sts0,
6147 qm_cgm_sts, dma_core_sts0);
6152 "\nTPC is_idle QM_GLBL_STS0 QM_CGM_STS CFG_STATUS\n"
6153 "--- ------- ------------ ---------- ----------\n");
6155 for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
6156 offset = i * TPC_QMAN_OFFSET;
6157 qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + offset);
6158 qm_cgm_sts = RREG32(mmTPC0_QM_CGM_STS + offset);
6159 tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + offset);
6160 is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
6161 IS_TPC_IDLE(tpc_cfg_sts);
6162 is_idle &= is_eng_idle;
6165 *mask |= ((u64) !is_eng_idle) <<
6166 (GAUDI_ENGINE_ID_TPC_0 + i);
6168 seq_printf(s, fmt, i,
6169 is_eng_idle ? "Y" : "N",
6170 qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
6175 "\nMME is_idle QM_GLBL_STS0 QM_CGM_STS ARCH_STATUS\n"
6176 "--- ------- ------------ ---------- -----------\n");
6178 for (i = 0 ; i < MME_NUMBER_OF_ENGINES ; i++) {
6179 offset = i * MME_QMAN_OFFSET;
6180 mme_arch_sts = RREG32(mmMME0_CTRL_ARCH_STATUS + offset);
6181 is_eng_idle = IS_MME_IDLE(mme_arch_sts);
6183 /* MME 1 & 3 are slaves, no need to check their QMANs */
6186 qm_glbl_sts0 = RREG32(mmMME0_QM_GLBL_STS0 + offset);
6187 qm_cgm_sts = RREG32(mmMME0_QM_CGM_STS + offset);
6188 is_eng_idle &= IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
6191 is_idle &= is_eng_idle;
6194 *mask |= ((u64) !is_eng_idle) <<
6195 (GAUDI_ENGINE_ID_MME_0 + i);
6198 seq_printf(s, fmt, i,
6199 is_eng_idle ? "Y" : "N",
6200 qm_glbl_sts0, qm_cgm_sts, mme_arch_sts);
6202 seq_printf(s, mme_slave_fmt, i,
6203 is_eng_idle ? "Y" : "N", "-",
6211 hdev->asic_funcs->set_clock_gating(hdev);
6213 mutex_unlock(&gaudi->clk_gate_mutex);
6218 static void gaudi_hw_queues_lock(struct hl_device *hdev)
6219 __acquires(&gaudi->hw_queues_lock)
6221 struct gaudi_device *gaudi = hdev->asic_specific;
6223 spin_lock(&gaudi->hw_queues_lock);
6226 static void gaudi_hw_queues_unlock(struct hl_device *hdev)
6227 __releases(&gaudi->hw_queues_lock)
6229 struct gaudi_device *gaudi = hdev->asic_specific;
6231 spin_unlock(&gaudi->hw_queues_lock);
6234 static u32 gaudi_get_pci_id(struct hl_device *hdev)
6236 return hdev->pdev->device;
6239 static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data,
6242 struct gaudi_device *gaudi = hdev->asic_specific;
6244 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
6247 return hl_fw_get_eeprom_data(hdev, data, max_size);
6251 * this function should be used only during initialization and/or after reset,
6252 * when there are no active users.
6254 static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
6257 struct gaudi_device *gaudi = hdev->asic_specific;
6262 offset = tpc_id * (mmTPC1_CFG_STATUS - mmTPC0_CFG_STATUS);
6265 kernel_timeout = GAUDI_PLDM_TPC_KERNEL_WAIT_USEC;
6267 kernel_timeout = HL_DEVICE_TIMEOUT_USEC;
6269 mutex_lock(&gaudi->clk_gate_mutex);
6271 hdev->asic_funcs->disable_clock_gating(hdev);
6273 WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW + offset,
6274 lower_32_bits(tpc_kernel));
6275 WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH + offset,
6276 upper_32_bits(tpc_kernel));
6278 WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW + offset,
6279 lower_32_bits(tpc_kernel));
6280 WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH + offset,
6281 upper_32_bits(tpc_kernel));
6282 /* set a valid LUT pointer, content is of no significance */
6283 WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_LO + offset,
6284 lower_32_bits(tpc_kernel));
6285 WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_HI + offset,
6286 upper_32_bits(tpc_kernel));
6288 WREG32(mmTPC0_CFG_QM_SYNC_OBJECT_ADDR + offset,
6289 lower_32_bits(CFG_BASE +
6290 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0));
6292 WREG32(mmTPC0_CFG_TPC_CMD + offset,
6293 (1 << TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT |
6294 1 << TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT));
6295 /* wait a bit for the engine to start executing */
6296 usleep_range(1000, 1500);
6298 /* wait until engine has finished executing */
6299 rc = hl_poll_timeout(
6301 mmTPC0_CFG_STATUS + offset,
6303 (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
6304 TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
6310 "Timeout while waiting for TPC%d icache prefetch\n",
6312 hdev->asic_funcs->set_clock_gating(hdev);
6313 mutex_unlock(&gaudi->clk_gate_mutex);
6317 WREG32(mmTPC0_CFG_TPC_EXECUTE + offset,
6318 1 << TPC0_CFG_TPC_EXECUTE_V_SHIFT);
6320 /* wait a bit for the engine to start executing */
6321 usleep_range(1000, 1500);
6323 /* wait until engine has finished executing */
6324 rc = hl_poll_timeout(
6326 mmTPC0_CFG_STATUS + offset,
6328 (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
6329 TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
6335 "Timeout while waiting for TPC%d vector pipe\n",
6337 hdev->asic_funcs->set_clock_gating(hdev);
6338 mutex_unlock(&gaudi->clk_gate_mutex);
6342 rc = hl_poll_timeout(
6344 mmTPC0_CFG_WQ_INFLIGHT_CNTR + offset,
6350 hdev->asic_funcs->set_clock_gating(hdev);
6351 mutex_unlock(&gaudi->clk_gate_mutex);
6355 "Timeout while waiting for TPC%d kernel to execute\n",
6363 static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
6365 return RREG32(mmHW_STATE);
6368 static int gaudi_ctx_init(struct hl_ctx *ctx)
6373 static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
6375 return gaudi_cq_assignment[cq_idx];
6378 static u32 gaudi_get_signal_cb_size(struct hl_device *hdev)
6380 return sizeof(struct packet_msg_short) +
6381 sizeof(struct packet_msg_prot) * 2;
6384 static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
6386 return sizeof(struct packet_msg_short) * 4 +
6387 sizeof(struct packet_fence) +
6388 sizeof(struct packet_msg_prot) * 2;
6391 static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
6393 struct hl_cb *cb = (struct hl_cb *) data;
6394 struct packet_msg_short *pkt;
6397 pkt = cb->kernel_address;
6398 memset(pkt, 0, sizeof(*pkt));
6400 /* Inc by 1, Mode ADD */
6401 value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1);
6402 value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK, 1);
6404 ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
6405 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
6406 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
6407 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
6408 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
6409 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
6410 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
6412 pkt->value = cpu_to_le32(value);
6413 pkt->ctl = cpu_to_le32(ctl);
6416 static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
6419 u32 ctl, pkt_size = sizeof(*pkt);
6421 memset(pkt, 0, pkt_size);
6423 ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
6424 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
6425 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
6426 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
6427 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
6428 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 0); /* last pkt MB */
6430 pkt->value = cpu_to_le32(value);
6431 pkt->ctl = cpu_to_le32(ctl);
6436 static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id,
6437 u16 sob_val, u16 addr)
6439 u32 ctl, value, pkt_size = sizeof(*pkt);
6440 u8 mask = ~(1 << (sob_id & 0x7));
6442 memset(pkt, 0, pkt_size);
6444 value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_id / 8);
6445 value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val);
6446 value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK,
6447 0); /* GREATER OR EQUAL*/
6448 value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask);
6450 ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
6451 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
6452 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
6453 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
6454 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
6455 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
6456 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
6458 pkt->value = cpu_to_le32(value);
6459 pkt->ctl = cpu_to_le32(ctl);
6464 static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
6466 u32 ctl, cfg, pkt_size = sizeof(*pkt);
6468 memset(pkt, 0, pkt_size);
6470 cfg = FIELD_PREP(GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK, 1);
6471 cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
6472 cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2);
6474 ctl = FIELD_PREP(GAUDI_PKT_FENCE_CTL_OPCODE_MASK, PACKET_FENCE);
6475 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
6476 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
6477 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
6479 pkt->cfg = cpu_to_le32(cfg);
6480 pkt->ctl = cpu_to_le32(ctl);
6485 static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
6486 u16 sob_val, u16 mon_id, u32 q_idx)
6488 struct hl_cb *cb = (struct hl_cb *) data;
6489 void *buf = cb->kernel_address;
6490 u64 monitor_base, fence_addr = 0;
6492 u16 msg_addr_offset;
6495 case GAUDI_QUEUE_ID_DMA_0_0:
6496 fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_0;
6498 case GAUDI_QUEUE_ID_DMA_0_1:
6499 fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_1;
6501 case GAUDI_QUEUE_ID_DMA_0_2:
6502 fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_2;
6504 case GAUDI_QUEUE_ID_DMA_0_3:
6505 fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_3;
6507 case GAUDI_QUEUE_ID_DMA_1_0:
6508 fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_0;
6510 case GAUDI_QUEUE_ID_DMA_1_1:
6511 fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_1;
6513 case GAUDI_QUEUE_ID_DMA_1_2:
6514 fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_2;
6516 case GAUDI_QUEUE_ID_DMA_1_3:
6517 fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_3;
6519 case GAUDI_QUEUE_ID_DMA_5_0:
6520 fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_0;
6522 case GAUDI_QUEUE_ID_DMA_5_1:
6523 fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_1;
6525 case GAUDI_QUEUE_ID_DMA_5_2:
6526 fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_2;
6528 case GAUDI_QUEUE_ID_DMA_5_3:
6529 fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_3;
6532 /* queue index should be valid here */
6533 dev_crit(hdev->dev, "wrong queue id %d for wait packet\n",
6538 fence_addr += CFG_BASE;
6541 * monitor_base should be the content of the base0 address registers,
6542 * so it will be added to the msg short offsets
6544 monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
6546 /* First monitor config packet: low address of the sync */
6548 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) -
6551 size += gaudi_add_mon_msg_short(buf + size, (u32) fence_addr,
6554 /* Second monitor config packet: high address of the sync */
6556 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) -
6559 size += gaudi_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32),
6563 * Third monitor config packet: the payload, i.e. what to write when the
6567 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) -
6570 size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset);
6572 /* Fourth monitor config packet: bind the monitor to a sync object */
6574 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) -
6576 size += gaudi_add_arm_monitor_pkt(buf + size, sob_id, sob_val,
6580 size += gaudi_add_fence_pkt(buf + size);
6583 static void gaudi_reset_sob(struct hl_device *hdev, void *data)
6585 struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
6587 dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
6590 WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4,
6593 kref_init(&hw_sob->kref);
6596 static void gaudi_set_dma_mask_from_fw(struct hl_device *hdev)
6598 if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) ==
6599 HL_POWER9_HOST_MAGIC) {
6600 hdev->power9_64bit_dma_enable = 1;
6601 hdev->dma_mask = 64;
6603 hdev->power9_64bit_dma_enable = 0;
6604 hdev->dma_mask = 48;
6608 static u64 gaudi_get_device_time(struct hl_device *hdev)
6610 u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
6612 return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
6615 static const struct hl_asic_funcs gaudi_funcs = {
6616 .early_init = gaudi_early_init,
6617 .early_fini = gaudi_early_fini,
6618 .late_init = gaudi_late_init,
6619 .late_fini = gaudi_late_fini,
6620 .sw_init = gaudi_sw_init,
6621 .sw_fini = gaudi_sw_fini,
6622 .hw_init = gaudi_hw_init,
6623 .hw_fini = gaudi_hw_fini,
6624 .halt_engines = gaudi_halt_engines,
6625 .suspend = gaudi_suspend,
6626 .resume = gaudi_resume,
6627 .cb_mmap = gaudi_cb_mmap,
6628 .ring_doorbell = gaudi_ring_doorbell,
6629 .pqe_write = gaudi_pqe_write,
6630 .asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
6631 .asic_dma_free_coherent = gaudi_dma_free_coherent,
6632 .get_int_queue_base = gaudi_get_int_queue_base,
6633 .test_queues = gaudi_test_queues,
6634 .asic_dma_pool_zalloc = gaudi_dma_pool_zalloc,
6635 .asic_dma_pool_free = gaudi_dma_pool_free,
6636 .cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc,
6637 .cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free,
6638 .hl_dma_unmap_sg = gaudi_dma_unmap_sg,
6639 .cs_parser = gaudi_cs_parser,
6640 .asic_dma_map_sg = gaudi_dma_map_sg,
6641 .get_dma_desc_list_size = gaudi_get_dma_desc_list_size,
6642 .add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
6643 .update_eq_ci = gaudi_update_eq_ci,
6644 .context_switch = gaudi_context_switch,
6645 .restore_phase_topology = gaudi_restore_phase_topology,
6646 .debugfs_read32 = gaudi_debugfs_read32,
6647 .debugfs_write32 = gaudi_debugfs_write32,
6648 .debugfs_read64 = gaudi_debugfs_read64,
6649 .debugfs_write64 = gaudi_debugfs_write64,
6650 .add_device_attr = gaudi_add_device_attr,
6651 .handle_eqe = gaudi_handle_eqe,
6652 .set_pll_profile = gaudi_set_pll_profile,
6653 .get_events_stat = gaudi_get_events_stat,
6654 .read_pte = gaudi_read_pte,
6655 .write_pte = gaudi_write_pte,
6656 .mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
6657 .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
6658 .send_heartbeat = gaudi_send_heartbeat,
6659 .set_clock_gating = gaudi_set_clock_gating,
6660 .disable_clock_gating = gaudi_disable_clock_gating,
6661 .debug_coresight = gaudi_debug_coresight,
6662 .is_device_idle = gaudi_is_device_idle,
6663 .soft_reset_late_init = gaudi_soft_reset_late_init,
6664 .hw_queues_lock = gaudi_hw_queues_lock,
6665 .hw_queues_unlock = gaudi_hw_queues_unlock,
6666 .get_pci_id = gaudi_get_pci_id,
6667 .get_eeprom_data = gaudi_get_eeprom_data,
6668 .send_cpu_message = gaudi_send_cpu_message,
6669 .get_hw_state = gaudi_get_hw_state,
6670 .pci_bars_map = gaudi_pci_bars_map,
6671 .init_iatu = gaudi_init_iatu,
6674 .halt_coresight = gaudi_halt_coresight,
6675 .ctx_init = gaudi_ctx_init,
6676 .get_clk_rate = gaudi_get_clk_rate,
6677 .get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
6678 .read_device_fw_version = gaudi_read_device_fw_version,
6679 .load_firmware_to_device = gaudi_load_firmware_to_device,
6680 .load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
6681 .get_signal_cb_size = gaudi_get_signal_cb_size,
6682 .get_wait_cb_size = gaudi_get_wait_cb_size,
6683 .gen_signal_cb = gaudi_gen_signal_cb,
6684 .gen_wait_cb = gaudi_gen_wait_cb,
6685 .reset_sob = gaudi_reset_sob,
6686 .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
6687 .get_device_time = gaudi_get_device_time
6691 * gaudi_set_asic_funcs - set GAUDI function pointers
6693 * @hdev: pointer to hl_device structure
6696 void gaudi_set_asic_funcs(struct hl_device *hdev)
6698 hdev->asic_funcs = &gaudi_funcs;