1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017 IBM Corp.
3 #include <asm/pnv-ocxl.h>
5 #include <misc/ocxl-config.h>
8 #define PNV_OCXL_TL_P9_RECV_CAP 0x000000000000000Full
9 #define PNV_OCXL_ACTAG_MAX 64
10 /* PASIDs are 20-bit, but on P9, NPU can only handle 15 bits */
11 #define PNV_OCXL_PASID_BITS 15
12 #define PNV_OCXL_PASID_MAX ((1 << PNV_OCXL_PASID_BITS) - 1)
14 #define AFU_PRESENT (1 << 31)
15 #define AFU_INDEX_MASK 0x3F000000
16 #define AFU_INDEX_SHIFT 24
17 #define ACTAG_MASK 0xFFF
26 struct list_head list;
30 u16 fn_desired_actags[8];
31 struct actag_range fn_actags[8];
34 static struct list_head links_list = LIST_HEAD_INIT(links_list);
35 static DEFINE_MUTEX(links_list_lock);
39 * opencapi actags handling:
41 * When sending commands, the opencapi device references the memory
42 * context it's targeting with an 'actag', which is really an alias
43 * for a (BDF, pasid) combination. When it receives a command, the NPU
44 * must do a lookup of the actag to identify the memory context. The
45 * hardware supports a finite number of actags per link (64 for
48 * The device can carry multiple functions, and each function can have
49 * multiple AFUs. Each AFU advertises in its config space the number
50 * of desired actags. The host must configure in the config space of
51 * the AFU how many actags the AFU is really allowed to use (which can
52 * be less than what the AFU desires).
54 * When a PCI function is probed by the driver, it has no visibility
55 * about the other PCI functions and how many actags they'd like,
56 * which makes it impossible to distribute actags fairly among AFUs.
58 * Unfortunately, the only way to know how many actags a function
59 * desires is by looking at the data for each AFU in the config space
60 * and add them up. Similarly, the only way to know how many actags
61 * all the functions of the physical device desire is by adding the
62 * previously computed function counts. Then we can match that against
63 * what the hardware supports.
65 * To get a comprehensive view, we use a 'pci fixup': at the end of
66 * PCI enumeration, each function counts how many actags its AFUs
67 * desire and we save it in a 'npu_link' structure, shared between all
68 * the PCI functions of a same device. Therefore, when the first
69 * function is probed by the driver, we can get an idea of the total
70 * count of desired actags for the device, and assign the actags to
71 * the AFUs, by pro-rating if needed.
74 static int find_dvsec_from_pos(struct pci_dev *dev, int dvsec_id, int pos)
79 while ((vsec = pci_find_next_ext_capability(dev, vsec,
80 OCXL_EXT_CAP_ID_DVSEC))) {
81 pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET,
83 pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id);
84 if (vendor == PCI_VENDOR_ID_IBM && id == dvsec_id)
90 static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx)
95 while ((vsec = find_dvsec_from_pos(dev, OCXL_DVSEC_AFU_CTRL_ID,
97 pci_read_config_byte(dev, vsec + OCXL_DVSEC_AFU_CTRL_AFU_IDX,
105 static int get_max_afu_index(struct pci_dev *dev, int *afu_idx)
110 pos = pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM,
115 pci_read_config_dword(dev, pos + OCXL_DVSEC_FUNC_OFF_INDEX, &val);
116 if (val & AFU_PRESENT)
117 *afu_idx = (val & AFU_INDEX_MASK) >> AFU_INDEX_SHIFT;
123 static int get_actag_count(struct pci_dev *dev, int afu_idx, int *actag)
128 pos = find_dvsec_afu_ctrl(dev, afu_idx);
132 pci_read_config_word(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_SUP,
134 *actag = actag_sup & ACTAG_MASK;
138 static struct npu_link *find_link(struct pci_dev *dev)
140 struct npu_link *link;
142 list_for_each_entry(link, &links_list, list) {
143 /* The functions of a device all share the same link */
144 if (link->domain == pci_domain_nr(dev->bus) &&
145 link->bus == dev->bus->number &&
146 link->dev == PCI_SLOT(dev->devfn)) {
151 /* link doesn't exist yet. Allocate one */
152 link = kzalloc(sizeof(struct npu_link), GFP_KERNEL);
155 link->domain = pci_domain_nr(dev->bus);
156 link->bus = dev->bus->number;
157 link->dev = PCI_SLOT(dev->devfn);
158 list_add(&link->list, &links_list);
162 static void pnv_ocxl_fixup_actag(struct pci_dev *dev)
164 struct pci_controller *hose = pci_bus_to_host(dev->bus);
165 struct pnv_phb *phb = hose->private_data;
166 struct npu_link *link;
167 int rc, afu_idx = -1, i, actag;
169 if (!machine_is(powernv))
172 if (phb->type != PNV_PHB_NPU_OCAPI)
175 mutex_lock(&links_list_lock);
177 link = find_link(dev);
179 dev_warn(&dev->dev, "couldn't update actag information\n");
180 mutex_unlock(&links_list_lock);
185 * Check how many actags are desired for the AFUs under that
186 * function and add it to the count for the link
188 rc = get_max_afu_index(dev, &afu_idx);
190 /* Most likely an invalid config space */
191 dev_dbg(&dev->dev, "couldn't find AFU information\n");
195 link->fn_desired_actags[PCI_FUNC(dev->devfn)] = 0;
196 for (i = 0; i <= afu_idx; i++) {
198 * AFU index 'holes' are allowed. So don't fail if we
199 * can't read the actag info for an index
201 rc = get_actag_count(dev, i, &actag);
204 link->fn_desired_actags[PCI_FUNC(dev->devfn)] += actag;
206 dev_dbg(&dev->dev, "total actags for function: %d\n",
207 link->fn_desired_actags[PCI_FUNC(dev->devfn)]);
209 mutex_unlock(&links_list_lock);
211 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_ocxl_fixup_actag);
213 static u16 assign_fn_actags(u16 desired, u16 total)
217 if (total <= PNV_OCXL_ACTAG_MAX)
220 count = PNV_OCXL_ACTAG_MAX * desired / total;
225 static void assign_actags(struct npu_link *link)
227 u16 actag_count, range_start = 0, total_desired = 0;
230 for (i = 0; i < 8; i++)
231 total_desired += link->fn_desired_actags[i];
233 for (i = 0; i < 8; i++) {
234 if (link->fn_desired_actags[i]) {
235 actag_count = assign_fn_actags(
236 link->fn_desired_actags[i],
238 link->fn_actags[i].start = range_start;
239 link->fn_actags[i].count = actag_count;
240 range_start += actag_count;
241 WARN_ON(range_start >= PNV_OCXL_ACTAG_MAX);
243 pr_debug("link %x:%x:%x fct %d actags: start=%d count=%d (desired=%d)\n",
244 link->domain, link->bus, link->dev, i,
245 link->fn_actags[i].start, link->fn_actags[i].count,
246 link->fn_desired_actags[i]);
248 link->assignment_done = true;
251 int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled,
254 struct npu_link *link;
256 mutex_lock(&links_list_lock);
258 link = find_link(dev);
260 dev_err(&dev->dev, "actag information not found\n");
261 mutex_unlock(&links_list_lock);
265 * On p9, we only have 64 actags per link, so they must be
266 * shared by all the functions of the same adapter. We counted
267 * the desired actag counts during PCI enumeration, so that we
268 * can allocate a pro-rated number of actags to each function.
270 if (!link->assignment_done)
273 *base = link->fn_actags[PCI_FUNC(dev->devfn)].start;
274 *enabled = link->fn_actags[PCI_FUNC(dev->devfn)].count;
275 *supported = link->fn_desired_actags[PCI_FUNC(dev->devfn)];
277 mutex_unlock(&links_list_lock);
280 EXPORT_SYMBOL_GPL(pnv_ocxl_get_actag);
282 int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count)
284 struct npu_link *link;
288 * The number of PASIDs (process address space ID) which can
289 * be used by a function depends on how many functions exist
290 * on the device. The NPU needs to be configured to know how
291 * many bits are available to PASIDs and how many are to be
292 * used by the function BDF identifier.
294 * We only support one AFU-carrying function for now.
296 mutex_lock(&links_list_lock);
298 link = find_link(dev);
300 dev_err(&dev->dev, "actag information not found\n");
301 mutex_unlock(&links_list_lock);
305 for (i = 0; i < 8; i++)
306 if (link->fn_desired_actags[i] && (i == PCI_FUNC(dev->devfn))) {
307 *count = PNV_OCXL_PASID_MAX;
312 mutex_unlock(&links_list_lock);
313 dev_dbg(&dev->dev, "%d PASIDs available for function\n",
317 EXPORT_SYMBOL_GPL(pnv_ocxl_get_pasid_count);
319 static void set_templ_rate(unsigned int templ, unsigned int rate, char *buf)
323 WARN_ON(templ > PNV_OCXL_TL_MAX_TEMPLATE);
324 idx = (PNV_OCXL_TL_MAX_TEMPLATE - templ) / 2;
325 shift = 4 * (1 - ((PNV_OCXL_TL_MAX_TEMPLATE - templ) % 2));
326 buf[idx] |= rate << shift;
329 int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap,
330 char *rate_buf, int rate_buf_size)
332 if (rate_buf_size != PNV_OCXL_TL_RATE_BUF_SIZE)
335 * The TL capabilities are a characteristic of the NPU, so
336 * we go with hard-coded values.
338 * The receiving rate of each template is encoded on 4 bits.
341 * - templates 0 -> 3 are supported
342 * - templates 0, 1 and 3 have a 0 receiving rate
343 * - template 2 has receiving rate of 1 (extra cycle)
345 memset(rate_buf, 0, rate_buf_size);
346 set_templ_rate(2, 1, rate_buf);
347 *cap = PNV_OCXL_TL_P9_RECV_CAP;
350 EXPORT_SYMBOL_GPL(pnv_ocxl_get_tl_cap);
352 int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap,
353 uint64_t rate_buf_phys, int rate_buf_size)
355 struct pci_controller *hose = pci_bus_to_host(dev->bus);
356 struct pnv_phb *phb = hose->private_data;
359 if (rate_buf_size != PNV_OCXL_TL_RATE_BUF_SIZE)
362 rc = opal_npu_tl_set(phb->opal_id, dev->devfn, cap,
363 rate_buf_phys, rate_buf_size);
365 dev_err(&dev->dev, "Can't configure host TL: %d\n", rc);
370 EXPORT_SYMBOL_GPL(pnv_ocxl_set_tl_conf);
372 int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq)
376 rc = of_property_read_u32(dev->dev.of_node, "ibm,opal-xsl-irq", hwirq);
379 "Can't get translation interrupt for device\n");
384 EXPORT_SYMBOL_GPL(pnv_ocxl_get_xsl_irq);
386 void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar,
387 void __iomem *tfc, void __iomem *pe_handle)
394 EXPORT_SYMBOL_GPL(pnv_ocxl_unmap_xsl_regs);
396 int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr,
397 void __iomem **dar, void __iomem **tfc,
398 void __iomem **pe_handle)
402 void __iomem *regs[4];
405 * opal stores the mmio addresses of the DSISR, DAR, TFC and
406 * PE_HANDLE registers in a device tree property, in that
409 for (i = 0; i < 4; i++) {
410 rc = of_property_read_u64_index(dev->dev.of_node,
411 "ibm,opal-xsl-mmio", i, ®);
414 regs[i] = ioremap(reg, 8);
421 dev_err(&dev->dev, "Can't map translation mmio registers\n");
422 for (j = i - 1; j >= 0; j--)
428 *pe_handle = regs[3];
432 EXPORT_SYMBOL_GPL(pnv_ocxl_map_xsl_regs);
439 int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask,
440 void **platform_data)
442 struct pci_controller *hose = pci_bus_to_host(dev->bus);
443 struct pnv_phb *phb = hose->private_data;
444 struct spa_data *data;
448 data = kzalloc(sizeof(*data), GFP_KERNEL);
452 bdfn = pci_dev_id(dev);
453 rc = opal_npu_spa_setup(phb->opal_id, bdfn, virt_to_phys(spa_mem),
456 dev_err(&dev->dev, "Can't setup Shared Process Area: %d\n", rc);
460 data->phb_opal_id = phb->opal_id;
462 *platform_data = (void *) data;
465 EXPORT_SYMBOL_GPL(pnv_ocxl_spa_setup);
467 void pnv_ocxl_spa_release(void *platform_data)
469 struct spa_data *data = (struct spa_data *) platform_data;
472 rc = opal_npu_spa_setup(data->phb_opal_id, data->bdfn, 0, 0);
476 EXPORT_SYMBOL_GPL(pnv_ocxl_spa_release);
478 int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle)
480 struct spa_data *data = (struct spa_data *) platform_data;
482 return opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle);
484 EXPORT_SYMBOL_GPL(pnv_ocxl_spa_remove_pe_from_cache);
486 int pnv_ocxl_map_lpar(struct pci_dev *dev, uint64_t lparid,
487 uint64_t lpcr, void __iomem **arva)
489 struct pci_controller *hose = pci_bus_to_host(dev->bus);
490 struct pnv_phb *phb = hose->private_data;
494 /* ATSD physical address.
495 * ATSD LAUNCH register: write access initiates a shoot down to
496 * initiate the TLB Invalidate command.
498 rc = of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
501 dev_info(&dev->dev, "No available ATSD found\n");
505 /* Assign a register set to a Logical Partition and MMIO ATSD
506 * LPARID register to the required value.
508 rc = opal_npu_map_lpar(phb->opal_id, pci_dev_id(dev),
511 dev_err(&dev->dev, "Error mapping device to LPAR: %d\n", rc);
515 *arva = ioremap(mmio_atsd, 24);
517 dev_warn(&dev->dev, "ioremap failed - mmio_atsd: %#llx\n", mmio_atsd);
523 EXPORT_SYMBOL_GPL(pnv_ocxl_map_lpar);
525 void pnv_ocxl_unmap_lpar(void __iomem *arva)
529 EXPORT_SYMBOL_GPL(pnv_ocxl_unmap_lpar);
531 void pnv_ocxl_tlb_invalidate(void __iomem *arva,
534 unsigned long page_size)
536 unsigned long timeout = jiffies + (HZ * PNV_OCXL_ATSD_TIMEOUT);
545 /* load Abbreviated Virtual Address register with
546 * the necessary value
548 val |= FIELD_PREP(PNV_OCXL_ATSD_AVA_AVA, addr >> (63-51));
549 out_be64(arva + PNV_OCXL_ATSD_AVA, val);
552 /* Write access initiates a shoot down to initiate the
553 * TLB Invalidate command
555 val = PNV_OCXL_ATSD_LNCH_R;
556 val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_RIC, 0b10);
558 val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_IS, 0b00);
560 val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_IS, 0b01);
561 val |= PNV_OCXL_ATSD_LNCH_OCAPI_SINGLETON;
563 val |= PNV_OCXL_ATSD_LNCH_PRS;
564 /* Actual Page Size to be invalidated
571 if (page_size == 0x1000)
573 if (page_size == 0x200000)
575 if (page_size == 0x40000000)
577 val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_AP, size);
578 val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_PID, pid);
579 out_be64(arva + PNV_OCXL_ATSD_LNCH, val);
581 /* Poll the ATSD status register to determine when the
582 * TLB Invalidate has been completed.
584 val = in_be64(arva + PNV_OCXL_ATSD_STAT);
588 if (time_after_eq(jiffies, timeout)) {
589 pr_err("%s - Timeout while reading XTS MMIO ATSD status register (val=%#llx, pidr=0x%lx)\n",
594 val = in_be64(arva + PNV_OCXL_ATSD_STAT);
598 EXPORT_SYMBOL_GPL(pnv_ocxl_tlb_invalidate);