1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2014 IBM Corp.
6 #include <linux/spinlock.h>
7 #include <linux/sched.h>
8 #include <linux/sched/clock.h>
9 #include <linux/slab.h>
10 #include <linux/mutex.h>
12 #include <linux/uaccess.h>
13 #include <linux/delay.h>
14 #include <linux/irqdomain.h>
15 #include <asm/synch.h>
16 #include <asm/switch_to.h>
17 #include <misc/cxl-base.h>
22 static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
23 u64 result, u64 mask, bool enabled)
26 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
29 spin_lock(&afu->afu_cntl_lock);
30 pr_devel("AFU command starting: %llx\n", command);
32 trace_cxl_afu_ctrl(afu, command);
34 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
35 cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
37 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
38 while ((AFU_Cntl & mask) != result) {
39 if (time_after_eq(jiffies, timeout)) {
40 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
45 if (!cxl_ops->link_ok(afu->adapter, afu)) {
46 afu->enabled = enabled;
51 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
54 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
57 if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
59 * Workaround for a bug in the XSL used in the Mellanox CX4
60 * that fails to clear the RA bit after an AFU reset,
61 * preventing subsequent AFU resets from working.
63 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
66 pr_devel("AFU command complete: %llx\n", command);
67 afu->enabled = enabled;
69 trace_cxl_afu_ctrl_done(afu, command, rc);
70 spin_unlock(&afu->afu_cntl_lock);
75 static int afu_enable(struct cxl_afu *afu)
77 pr_devel("AFU enable request\n");
79 return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
80 CXL_AFU_Cntl_An_ES_Enabled,
81 CXL_AFU_Cntl_An_ES_MASK, true);
84 int cxl_afu_disable(struct cxl_afu *afu)
86 pr_devel("AFU disable request\n");
88 return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
89 CXL_AFU_Cntl_An_ES_Disabled,
90 CXL_AFU_Cntl_An_ES_MASK, false);
93 /* This will disable as well as reset */
94 static int native_afu_reset(struct cxl_afu *afu)
99 pr_devel("AFU reset request\n");
101 rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
102 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
103 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
107 * Re-enable any masked interrupts when the AFU is not
108 * activated to avoid side effects after attaching a process
111 if (afu->current_mode == 0) {
112 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
113 serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
114 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
120 static int native_afu_check_and_enable(struct cxl_afu *afu)
122 if (!cxl_ops->link_ok(afu->adapter, afu)) {
123 WARN(1, "Refusing to enable afu while link down!\n");
128 return afu_enable(afu);
131 int cxl_psl_purge(struct cxl_afu *afu)
133 u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
134 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
137 u64 trans_fault = 0x0ULL;
138 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
141 trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
143 pr_devel("PSL purge request\n");
146 trans_fault = CXL_PSL_DSISR_TRANS;
148 trans_fault = CXL_PSL9_DSISR_An_TF;
150 if (!cxl_ops->link_ok(afu->adapter, afu)) {
151 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
156 if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
157 WARN(1, "psl_purge request while AFU not disabled!\n");
158 cxl_afu_disable(afu);
161 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
162 PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
163 start = local_clock();
164 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
165 while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
166 == CXL_PSL_SCNTL_An_Ps_Pending) {
167 if (time_after_eq(jiffies, timeout)) {
168 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
172 if (!cxl_ops->link_ok(afu->adapter, afu)) {
177 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
178 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n",
181 if (dsisr & trans_fault) {
182 dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
183 dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
185 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
187 dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
189 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
193 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
196 pr_devel("PSL purged in %lld ns\n", end - start);
198 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
199 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
201 trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
205 static int spa_max_procs(int spa_size)
209 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
210 * Most of that junk is really just an overly-complicated way of saying
211 * the last 256 bytes are __aligned(128), so it's really:
212 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
214 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
216 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
217 * Ignore the alignment (which is safe in this case as long as we are
218 * careful with our rounding) and solve for n:
220 return ((spa_size / 8) - 96) / 17;
223 static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
227 /* Work out how many pages to allocate */
228 afu->native->spa_order = -1;
230 afu->native->spa_order++;
231 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
233 if (spa_size > 0x100000) {
234 dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
235 afu->native->spa_max_procs, afu->native->spa_size);
236 if (mode != CXL_MODE_DEDICATED)
237 afu->num_procs = afu->native->spa_max_procs;
241 afu->native->spa_size = spa_size;
242 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
243 } while (afu->native->spa_max_procs < afu->num_procs);
245 if (!(afu->native->spa = (struct cxl_process_element *)
246 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
247 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
250 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
251 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
256 static void attach_spa(struct cxl_afu *afu)
260 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
261 ((afu->native->spa_max_procs + 3) * 128));
263 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
264 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
265 spap |= CXL_PSL_SPAP_V;
266 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
267 afu->native->spa, afu->native->spa_max_procs,
268 afu->native->sw_command_status, spap);
269 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
272 void cxl_release_spa(struct cxl_afu *afu)
274 if (afu->native->spa) {
275 free_pages((unsigned long) afu->native->spa,
276 afu->native->spa_order);
277 afu->native->spa = NULL;
282 * Invalidation of all ERAT entries is no longer required by CAIA2. Use
285 int cxl_invalidate_all_psl9(struct cxl *adapter)
287 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
290 pr_devel("CXL adapter - invalidation of all ERAT entries\n");
292 /* Invalidates all ERAT entries for Radix or HPT */
293 ierat = CXL_XSL9_IERAT_IALL;
295 ierat |= CXL_XSL9_IERAT_INVR;
296 cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
298 while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
299 if (time_after_eq(jiffies, timeout)) {
300 dev_warn(&adapter->dev,
301 "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
304 if (!cxl_ops->link_ok(adapter, NULL))
311 int cxl_invalidate_all_psl8(struct cxl *adapter)
313 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
315 pr_devel("CXL adapter wide TLBIA & SLBIA\n");
317 cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
319 cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
320 while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
321 if (time_after_eq(jiffies, timeout)) {
322 dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
325 if (!cxl_ops->link_ok(adapter, NULL))
330 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
331 while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
332 if (time_after_eq(jiffies, timeout)) {
333 dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
336 if (!cxl_ops->link_ok(adapter, NULL))
343 int cxl_data_cache_flush(struct cxl *adapter)
346 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
349 * Do a datacache flush only if datacache is available.
350 * In case of PSL9D datacache absent hence flush operation.
353 if (adapter->native->no_data_cache) {
354 pr_devel("No PSL data cache. Ignoring cache flush req.\n");
358 pr_devel("Flushing data cache\n");
359 reg = cxl_p1_read(adapter, CXL_PSL_Control);
360 reg |= CXL_PSL_Control_Fr;
361 cxl_p1_write(adapter, CXL_PSL_Control, reg);
363 reg = cxl_p1_read(adapter, CXL_PSL_Control);
364 while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
365 if (time_after_eq(jiffies, timeout)) {
366 dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
370 if (!cxl_ops->link_ok(adapter, NULL)) {
371 dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
375 reg = cxl_p1_read(adapter, CXL_PSL_Control);
378 reg &= ~CXL_PSL_Control_Fr;
379 cxl_p1_write(adapter, CXL_PSL_Control, reg);
383 static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
387 /* 1. Disable SSTP by writing 0 to SSTP1[V] */
388 cxl_p2n_write(afu, CXL_SSTP1_An, 0);
390 /* 2. Invalidate all SLB entries */
391 if ((rc = cxl_afu_slbia(afu)))
394 /* 3. Set SSTP0_An */
395 cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
397 /* 4. Set SSTP1_An */
398 cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
403 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
404 static void slb_invalid(struct cxl_context *ctx)
406 struct cxl *adapter = ctx->afu->adapter;
409 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
411 cxl_p1_write(adapter, CXL_PSL_LBISEL,
412 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
413 be32_to_cpu(ctx->elem->lpid));
414 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
417 if (!cxl_ops->link_ok(adapter, NULL))
419 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
420 if (!(slbia & CXL_TLB_SLB_P))
426 static int do_process_element_cmd(struct cxl_context *ctx,
427 u64 cmd, u64 pe_state)
430 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
433 trace_cxl_llcmd(ctx, cmd);
435 WARN_ON(!ctx->afu->enabled);
437 ctx->elem->software_state = cpu_to_be32(pe_state);
439 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
441 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
443 if (time_after_eq(jiffies, timeout)) {
444 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
448 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
449 dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
453 state = be64_to_cpup(ctx->afu->native->sw_command_status);
454 if (state == ~0ULL) {
455 pr_err("cxl: Error adding process element to AFU\n");
459 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
460 (cmd | (cmd >> 16) | ctx->pe))
463 * The command won't finish in the PSL if there are
464 * outstanding DSIs. Hence we need to yield here in
465 * case there are outstanding DSIs that we need to
466 * service. Tuning possiblity: we could wait for a
473 trace_cxl_llcmd_done(ctx, cmd, rc);
477 static int add_process_element(struct cxl_context *ctx)
481 mutex_lock(&ctx->afu->native->spa_mutex);
482 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
483 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
484 ctx->pe_inserted = true;
485 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
486 mutex_unlock(&ctx->afu->native->spa_mutex);
490 static int terminate_process_element(struct cxl_context *ctx)
494 /* fast path terminate if it's already invalid */
495 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
498 mutex_lock(&ctx->afu->native->spa_mutex);
499 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
500 /* We could be asked to terminate when the hw is down. That
501 * should always succeed: it's not running if the hw has gone
502 * away and is being reset.
504 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
505 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
506 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
507 ctx->elem->software_state = 0; /* Remove Valid bit */
508 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
509 mutex_unlock(&ctx->afu->native->spa_mutex);
513 static int remove_process_element(struct cxl_context *ctx)
517 mutex_lock(&ctx->afu->native->spa_mutex);
518 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
520 /* We could be asked to remove when the hw is down. Again, if
521 * the hw is down, the PE is gone, so we succeed.
523 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
524 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
527 ctx->pe_inserted = false;
530 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
531 mutex_unlock(&ctx->afu->native->spa_mutex);
536 void cxl_assign_psn_space(struct cxl_context *ctx)
538 if (!ctx->afu->pp_size || ctx->master) {
539 ctx->psn_phys = ctx->afu->psn_phys;
540 ctx->psn_size = ctx->afu->adapter->ps_size;
542 ctx->psn_phys = ctx->afu->psn_phys +
543 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
544 ctx->psn_size = ctx->afu->pp_size;
548 static int activate_afu_directed(struct cxl_afu *afu)
552 dev_info(&afu->dev, "Activating AFU directed mode\n");
554 afu->num_procs = afu->max_procs_virtualised;
555 if (afu->native->spa == NULL) {
556 if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
561 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
563 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
564 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
566 afu->current_mode = CXL_MODE_DIRECTED;
568 if ((rc = cxl_chardev_m_afu_add(afu)))
571 if ((rc = cxl_sysfs_afu_m_add(afu)))
574 if ((rc = cxl_chardev_s_afu_add(afu)))
579 cxl_sysfs_afu_m_remove(afu);
581 cxl_chardev_afu_remove(afu);
585 #ifdef CONFIG_CPU_LITTLE_ENDIAN
586 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
588 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
591 u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
597 sr |= CXL_PSL_SR_An_MP;
598 if (mfspr(SPRN_LPCR) & LPCR_TC)
599 sr |= CXL_PSL_SR_An_TC;
603 sr |= CXL_PSL_SR_An_R;
604 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
606 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
608 sr |= CXL_PSL_SR_An_HV;
610 sr &= ~(CXL_PSL_SR_An_HV);
611 if (!test_tsk_thread_flag(current, TIF_32BIT))
612 sr |= CXL_PSL_SR_An_SF;
616 sr |= CXL_PSL_SR_An_XLAT_ror;
618 sr |= CXL_PSL_SR_An_XLAT_hpt;
623 static u64 calculate_sr(struct cxl_context *ctx)
625 return cxl_calculate_sr(ctx->master, ctx->kernel, false,
629 static void update_ivtes_directed(struct cxl_context *ctx)
631 bool need_update = (ctx->status == STARTED);
635 WARN_ON(terminate_process_element(ctx));
636 WARN_ON(remove_process_element(ctx));
639 for (r = 0; r < CXL_IRQ_RANGES; r++) {
640 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
641 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
645 * Theoretically we could use the update llcmd, instead of a
646 * terminate/remove/add (or if an atomic update was required we could
647 * do a suspend/update/resume), however it seems there might be issues
648 * with the update llcmd on some cards (including those using an XSL on
649 * an ASIC) so for now it's safest to go with the commands that are
650 * known to work. In the future if we come across a situation where the
651 * card may be performing transactions using the same PE while we are
652 * doing this update we might need to revisit this.
655 WARN_ON(add_process_element(ctx));
658 static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
663 cxl_assign_psn_space(ctx);
665 ctx->elem->ctxtime = 0; /* disable */
666 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
667 ctx->elem->haurp = 0; /* disable */
672 if (ctx->mm == NULL) {
673 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
674 __func__, ctx->pe, pid_nr(ctx->pid));
677 pid = ctx->mm->context.id;
680 /* Assign a unique TIDR (thread id) for the current thread */
681 if (!(ctx->tidr) && (ctx->assign_tidr)) {
682 rc = set_thread_tidr(current);
685 ctx->tidr = current->thread.tidr;
686 pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr);
689 ctx->elem->common.tid = cpu_to_be32(ctx->tidr);
690 ctx->elem->common.pid = cpu_to_be32(pid);
692 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
694 ctx->elem->common.csrp = 0; /* disable */
696 cxl_prefault(ctx, wed);
699 * Ensure we have the multiplexed PSL interrupt set up to take faults
700 * for kernel contexts that may not have allocated any AFU IRQs at all:
702 if (ctx->irqs.range[0] == 0) {
703 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
704 ctx->irqs.range[0] = 1;
707 ctx->elem->common.amr = cpu_to_be64(amr);
708 ctx->elem->common.wed = cpu_to_be64(wed);
713 int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
717 /* fill the process element entry */
718 result = process_element_entry_psl9(ctx, wed, amr);
722 update_ivtes_directed(ctx);
724 /* first guy needs to enable */
725 result = cxl_ops->afu_check_and_enable(ctx->afu);
729 return add_process_element(ctx);
732 int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
737 cxl_assign_psn_space(ctx);
739 ctx->elem->ctxtime = 0; /* disable */
740 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
741 ctx->elem->haurp = 0; /* disable */
742 ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
747 ctx->elem->common.tid = 0;
748 ctx->elem->common.pid = cpu_to_be32(pid);
750 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
752 ctx->elem->common.csrp = 0; /* disable */
753 ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
754 ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
756 cxl_prefault(ctx, wed);
758 ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
759 ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
762 * Ensure we have the multiplexed PSL interrupt set up to take faults
763 * for kernel contexts that may not have allocated any AFU IRQs at all:
765 if (ctx->irqs.range[0] == 0) {
766 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
767 ctx->irqs.range[0] = 1;
770 update_ivtes_directed(ctx);
772 ctx->elem->common.amr = cpu_to_be64(amr);
773 ctx->elem->common.wed = cpu_to_be64(wed);
775 /* first guy needs to enable */
776 if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
779 return add_process_element(ctx);
782 static int deactivate_afu_directed(struct cxl_afu *afu)
784 dev_info(&afu->dev, "Deactivating AFU directed mode\n");
786 afu->current_mode = 0;
789 cxl_sysfs_afu_m_remove(afu);
790 cxl_chardev_afu_remove(afu);
793 * The CAIA section 2.2.1 indicates that the procedure for starting and
794 * stopping an AFU in AFU directed mode is AFU specific, which is not
795 * ideal since this code is generic and with one exception has no
796 * knowledge of the AFU. This is in contrast to the procedure for
797 * disabling a dedicated process AFU, which is documented to just
798 * require a reset. The architecture does indicate that both an AFU
799 * reset and an AFU disable should result in the AFU being disabled and
800 * we do both followed by a PSL purge for safety.
802 * Notably we used to have some issues with the disable sequence on PSL
803 * cards, which is why we ended up using this heavy weight procedure in
804 * the first place, however a bug was discovered that had rendered the
805 * disable operation ineffective, so it is conceivable that was the
806 * sole explanation for those difficulties. Careful regression testing
807 * is recommended if anyone attempts to remove or reorder these
810 * The XSL on the Mellanox CX4 behaves a little differently from the
811 * PSL based cards and will time out an AFU reset if the AFU is still
812 * enabled. That card is special in that we do have a means to identify
813 * it from this code, so in that case we skip the reset and just use a
814 * disable/purge to avoid the timeout and corresponding noise in the
817 if (afu->adapter->native->sl_ops->needs_reset_before_disable)
818 cxl_ops->afu_reset(afu);
819 cxl_afu_disable(afu);
825 int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
827 dev_info(&afu->dev, "Activating dedicated process mode\n");
830 * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
831 * XSL and AFU are programmed to work with a single context.
832 * The context information should be configured in the SPA area
833 * index 0 (so PSL_SPAP must be configured before enabling the
837 if (afu->native->spa == NULL) {
838 if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
843 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
844 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
846 afu->current_mode = CXL_MODE_DEDICATED;
848 return cxl_chardev_d_afu_add(afu);
851 int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
853 dev_info(&afu->dev, "Activating dedicated process mode\n");
855 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
857 cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
858 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
859 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
860 cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
861 cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
862 cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
864 cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
865 cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
866 cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
868 afu->current_mode = CXL_MODE_DEDICATED;
871 return cxl_chardev_d_afu_add(afu);
874 void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
878 for (r = 0; r < CXL_IRQ_RANGES; r++) {
879 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
880 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
884 void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
886 struct cxl_afu *afu = ctx->afu;
888 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
889 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
890 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
891 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
892 ((u64)ctx->irqs.offset[3] & 0xffff));
893 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
894 (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
895 (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
896 (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
897 ((u64)ctx->irqs.range[3] & 0xffff));
900 int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
902 struct cxl_afu *afu = ctx->afu;
905 /* fill the process element entry */
906 result = process_element_entry_psl9(ctx, wed, amr);
910 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
911 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
913 ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
915 * Ideally we should do a wmb() here to make sure the changes to the
916 * PE are visible to the card before we call afu_enable.
917 * On ppc64 though all mmios are preceded by a 'sync' instruction hence
918 * we dont dont need one here.
921 result = cxl_ops->afu_reset(afu);
925 return afu_enable(afu);
928 int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
930 struct cxl_afu *afu = ctx->afu;
934 pid = (u64)current->pid << 32;
937 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
939 cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
941 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
944 cxl_prefault(ctx, wed);
946 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
947 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
949 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
951 /* master only context for dedicated */
952 cxl_assign_psn_space(ctx);
954 if ((rc = cxl_ops->afu_reset(afu)))
957 cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
959 return afu_enable(afu);
962 static int deactivate_dedicated_process(struct cxl_afu *afu)
964 dev_info(&afu->dev, "Deactivating dedicated process mode\n");
966 afu->current_mode = 0;
969 cxl_chardev_afu_remove(afu);
974 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
976 if (mode == CXL_MODE_DIRECTED)
977 return deactivate_afu_directed(afu);
978 if (mode == CXL_MODE_DEDICATED)
979 return deactivate_dedicated_process(afu);
983 static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
987 if (!(mode & afu->modes_supported))
990 if (!cxl_ops->link_ok(afu->adapter, afu)) {
991 WARN(1, "Device link is down, refusing to activate!\n");
995 if (mode == CXL_MODE_DIRECTED)
996 return activate_afu_directed(afu);
997 if ((mode == CXL_MODE_DEDICATED) &&
998 (afu->adapter->native->sl_ops->activate_dedicated_process))
999 return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
1004 static int native_attach_process(struct cxl_context *ctx, bool kernel,
1007 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
1008 WARN(1, "Device link is down, refusing to attach process!\n");
1012 ctx->kernel = kernel;
1013 if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
1014 (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
1015 return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
1017 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1018 (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
1019 return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
1024 static inline int detach_process_native_dedicated(struct cxl_context *ctx)
1027 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
1028 * stop the AFU in dedicated mode (we therefore do not make that
1029 * optional like we do in the afu directed path). It does not indicate
1030 * that we need to do an explicit disable (which should occur
1031 * implicitly as part of the reset) or purge, but we do these as well
1032 * to be on the safe side.
1034 * Notably we used to have some issues with the disable sequence
1035 * (before the sequence was spelled out in the architecture) which is
1036 * why we were so heavy weight in the first place, however a bug was
1037 * discovered that had rendered the disable operation ineffective, so
1038 * it is conceivable that was the sole explanation for those
1039 * difficulties. Point is, we should be careful and do some regression
1040 * testing if we ever attempt to remove any part of this procedure.
1042 cxl_ops->afu_reset(ctx->afu);
1043 cxl_afu_disable(ctx->afu);
1044 cxl_psl_purge(ctx->afu);
1048 static void native_update_ivtes(struct cxl_context *ctx)
1050 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
1051 return update_ivtes_directed(ctx);
1052 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1053 (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
1054 return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
1055 WARN(1, "native_update_ivtes: Bad mode\n");
1058 static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
1060 if (!ctx->pe_inserted)
1062 if (terminate_process_element(ctx))
1064 if (remove_process_element(ctx))
1070 static int native_detach_process(struct cxl_context *ctx)
1072 trace_cxl_detach(ctx);
1074 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
1075 return detach_process_native_dedicated(ctx);
1077 return detach_process_native_afu_directed(ctx);
1080 static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
1082 /* If the adapter has gone away, we can't get any meaningful
1085 if (!cxl_ops->link_ok(afu->adapter, afu))
1088 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1089 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
1090 if (cxl_is_power8())
1091 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
1092 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1093 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1094 info->proc_handle = 0;
1099 void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
1103 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
1105 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1106 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1107 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1108 cxl_afu_decode_psl_serr(ctx->afu, serr);
1112 void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
1114 u64 fir1, fir2, fir_slice, serr, afu_debug;
1116 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
1117 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
1118 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
1119 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
1121 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1122 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
1123 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1124 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1125 cxl_afu_decode_psl_serr(ctx->afu, serr);
1127 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1128 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1131 static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
1132 u64 dsisr, u64 errstat)
1135 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
1137 if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
1138 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
1140 if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
1141 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
1142 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
1145 return cxl_ops->ack_irq(ctx, 0, errstat);
1148 static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
1150 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
1153 if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
1159 irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
1161 if (cxl_is_translation_fault(afu, irq_info->dsisr))
1162 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1164 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1169 static irqreturn_t native_irq_multiplexed(int irq, void *data)
1171 struct cxl_afu *afu = data;
1172 struct cxl_context *ctx;
1173 struct cxl_irq_info irq_info;
1174 u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
1175 int ph, ret = IRQ_HANDLED, res;
1177 /* check if eeh kicked in while the interrupt was in flight */
1178 if (unlikely(phreg == ~0ULL)) {
1180 "Ignoring slice interrupt(%d) due to fenced card",
1184 /* Mask the pe-handle from register value */
1185 ph = phreg & 0xffff;
1186 if ((res = native_get_irq_info(afu, &irq_info))) {
1187 WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
1188 if (afu->adapter->native->sl_ops->fail_irq)
1189 return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1194 ctx = idr_find(&afu->contexts_idr, ph);
1196 if (afu->adapter->native->sl_ops->handle_interrupt)
1197 ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
1203 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
1204 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
1205 " with outstanding transactions?)\n", ph, irq_info.dsisr,
1207 if (afu->adapter->native->sl_ops->fail_irq)
1208 ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1212 static void native_irq_wait(struct cxl_context *ctx)
1219 * Wait until no further interrupts are presented by the PSL
1223 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
1226 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
1227 if (cxl_is_power8() &&
1228 ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
1230 if (cxl_is_power9() &&
1231 ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
1234 * We are waiting for the workqueue to process our
1235 * irq, so need to let that run here.
1240 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
1241 " DSISR %016llx!\n", ph, dsisr);
1245 static irqreturn_t native_slice_irq_err(int irq, void *data)
1247 struct cxl_afu *afu = data;
1248 u64 errstat, serr, afu_error, dsisr;
1249 u64 fir_slice, afu_debug, irq_mask;
1252 * slice err interrupt is only used with full PSL (no XSL)
1254 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1255 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1256 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1257 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1258 cxl_afu_decode_psl_serr(afu, serr);
1260 if (cxl_is_power8()) {
1261 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
1262 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
1263 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1264 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1266 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
1267 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
1268 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
1270 /* mask off the IRQ so it won't retrigger until the AFU is reset */
1271 irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
1273 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1274 dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
1279 void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
1283 fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
1284 dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
1287 void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
1291 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
1292 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
1293 dev_crit(&adapter->dev,
1294 "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
1298 static irqreturn_t native_irq_err(int irq, void *data)
1300 struct cxl *adapter = data;
1303 WARN(1, "CXL ERROR interrupt %i\n", irq);
1305 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
1306 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
1308 if (adapter->native->sl_ops->debugfs_stop_trace) {
1309 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
1310 adapter->native->sl_ops->debugfs_stop_trace(adapter);
1313 if (adapter->native->sl_ops->err_irq_dump_registers)
1314 adapter->native->sl_ops->err_irq_dump_registers(adapter);
1319 int cxl_native_register_psl_err_irq(struct cxl *adapter)
1323 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1324 dev_name(&adapter->dev));
1325 if (!adapter->irq_name)
1328 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
1329 &adapter->native->err_hwirq,
1330 &adapter->native->err_virq,
1331 adapter->irq_name))) {
1332 kfree(adapter->irq_name);
1333 adapter->irq_name = NULL;
1337 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
1342 void cxl_native_release_psl_err_irq(struct cxl *adapter)
1344 if (adapter->native->err_virq == 0 ||
1345 adapter->native->err_virq !=
1346 irq_find_mapping(NULL, adapter->native->err_hwirq))
1349 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1350 cxl_unmap_irq(adapter->native->err_virq, adapter);
1351 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1352 kfree(adapter->irq_name);
1353 adapter->native->err_virq = 0;
1356 int cxl_native_register_serr_irq(struct cxl_afu *afu)
1361 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1362 dev_name(&afu->dev));
1363 if (!afu->err_irq_name)
1366 if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
1368 &afu->serr_virq, afu->err_irq_name))) {
1369 kfree(afu->err_irq_name);
1370 afu->err_irq_name = NULL;
1374 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1375 if (cxl_is_power8())
1376 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
1377 if (cxl_is_power9()) {
1379 * By default, all errors are masked. So don't set all masks.
1380 * Slice errors will be transfered.
1382 serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
1384 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1389 void cxl_native_release_serr_irq(struct cxl_afu *afu)
1391 if (afu->serr_virq == 0 ||
1392 afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1395 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1396 cxl_unmap_irq(afu->serr_virq, afu);
1397 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1398 kfree(afu->err_irq_name);
1402 int cxl_native_register_psl_irq(struct cxl_afu *afu)
1406 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
1407 dev_name(&afu->dev));
1408 if (!afu->psl_irq_name)
1411 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
1412 afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
1413 afu->psl_irq_name))) {
1414 kfree(afu->psl_irq_name);
1415 afu->psl_irq_name = NULL;
1420 void cxl_native_release_psl_irq(struct cxl_afu *afu)
1422 if (afu->native->psl_virq == 0 ||
1423 afu->native->psl_virq !=
1424 irq_find_mapping(NULL, afu->native->psl_hwirq))
1427 cxl_unmap_irq(afu->native->psl_virq, afu);
1428 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1429 kfree(afu->psl_irq_name);
1430 afu->native->psl_virq = 0;
1433 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
1437 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
1439 /* Clear PSL_DSISR[PE] */
1440 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1441 cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
1443 /* Write 1s to clear error status bits */
1444 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
1447 static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
1449 trace_cxl_psl_irq_ack(ctx, tfc);
1451 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
1453 recover_psl_err(ctx->afu, psl_reset_mask);
1458 int cxl_check_error(struct cxl_afu *afu)
1460 return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
1463 static bool native_support_attributes(const char *attr_name,
1464 enum cxl_attrs type)
1469 static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
1471 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1473 if (unlikely(off >= afu->crs_len))
1475 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
1476 (cr * afu->crs_len) + off);
1480 static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
1482 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1484 if (unlikely(off >= afu->crs_len))
1486 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1487 (cr * afu->crs_len) + off);
1491 static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
1493 u64 aligned_off = off & ~0x3L;
1497 rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1499 *out = (val >> ((off & 0x3) * 8)) & 0xffff;
1503 static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
1505 u64 aligned_off = off & ~0x3L;
1509 rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1511 *out = (val >> ((off & 0x3) * 8)) & 0xff;
1515 static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
1517 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1519 if (unlikely(off >= afu->crs_len))
1521 out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1522 (cr * afu->crs_len) + off, in);
1526 static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
1528 u64 aligned_off = off & ~0x3L;
1529 u32 val32, mask, shift;
1532 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1535 shift = (off & 0x3) * 8;
1536 WARN_ON(shift == 24);
1537 mask = 0xffff << shift;
1538 val32 = (val32 & ~mask) | (in << shift);
1540 rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1544 static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
1546 u64 aligned_off = off & ~0x3L;
1547 u32 val32, mask, shift;
1550 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1553 shift = (off & 0x3) * 8;
1554 mask = 0xff << shift;
1555 val32 = (val32 & ~mask) | (in << shift);
1557 rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1561 const struct cxl_backend_ops cxl_native_ops = {
1562 .module = THIS_MODULE,
1563 .adapter_reset = cxl_pci_reset,
1564 .alloc_one_irq = cxl_pci_alloc_one_irq,
1565 .release_one_irq = cxl_pci_release_one_irq,
1566 .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
1567 .release_irq_ranges = cxl_pci_release_irq_ranges,
1568 .setup_irq = cxl_pci_setup_irq,
1569 .handle_psl_slice_error = native_handle_psl_slice_error,
1570 .psl_interrupt = NULL,
1571 .ack_irq = native_ack_irq,
1572 .irq_wait = native_irq_wait,
1573 .attach_process = native_attach_process,
1574 .detach_process = native_detach_process,
1575 .update_ivtes = native_update_ivtes,
1576 .support_attributes = native_support_attributes,
1577 .link_ok = cxl_adapter_link_ok,
1578 .release_afu = cxl_pci_release_afu,
1579 .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
1580 .afu_check_and_enable = native_afu_check_and_enable,
1581 .afu_activate_mode = native_afu_activate_mode,
1582 .afu_deactivate_mode = native_afu_deactivate_mode,
1583 .afu_reset = native_afu_reset,
1584 .afu_cr_read8 = native_afu_cr_read8,
1585 .afu_cr_read16 = native_afu_cr_read16,
1586 .afu_cr_read32 = native_afu_cr_read32,
1587 .afu_cr_read64 = native_afu_cr_read64,
1588 .afu_cr_write8 = native_afu_cr_write8,
1589 .afu_cr_write16 = native_afu_cr_write16,
1590 .afu_cr_write32 = native_afu_cr_write32,
1591 .read_adapter_vpd = cxl_pci_read_adapter_vpd,