1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/efi.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/irqdomain.h>
19 #include <linux/list.h>
20 #include <linux/log2.h>
21 #include <linux/memblock.h>
23 #include <linux/msi.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_pci.h>
28 #include <linux/of_platform.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/syscore_ops.h>
33 #include <linux/irqchip.h>
34 #include <linux/irqchip/arm-gic-v3.h>
35 #include <linux/irqchip/arm-gic-v4.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
40 #include "irq-gic-common.h"
42 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
46 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
47 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
49 static u32 lpi_id_bits;
52 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
53 * deal with (one configuration byte per interrupt). PENDBASE has to
54 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
56 #define LPI_NRBITS lpi_id_bits
57 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
58 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
60 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
63 * Collection structure - just an ID, and a redistributor address to
64 * ping. We use one per CPU as a bag of interrupts assigned to this
67 struct its_collection {
73 * The ITS_BASER structure - contains memory information, cached
74 * value of BASER register configuration and ITS page size.
86 * The ITS structure - contains most of the infrastructure, with the
87 * top-level MSI domain, the command queue, the collections, and the
88 * list of devices writing to it.
90 * dev_alloc_lock has to be taken for device allocations, while the
91 * spinlock must be taken to parse data structures such as the device
96 struct mutex dev_alloc_lock;
97 struct list_head entry;
99 void __iomem *sgir_base;
100 phys_addr_t phys_base;
101 struct its_cmd_block *cmd_base;
102 struct its_cmd_block *cmd_write;
103 struct its_baser tables[GITS_BASER_NR_REGS];
104 struct its_collection *collections;
105 struct fwnode_handle *fwnode_handle;
106 u64 (*get_msi_base)(struct its_device *its_dev);
111 struct list_head its_device_list;
113 unsigned long list_nr;
115 unsigned int msi_domain_flags;
116 u32 pre_its_base; /* for Socionext Synquacer */
117 int vlpi_redist_offset;
120 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
121 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
122 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
124 #define ITS_ITT_ALIGN SZ_256
126 /* The maximum number of VPEID bits supported by VLPI commands */
127 #define ITS_MAX_VPEID_BITS \
130 if (gic_rdists->has_rvpeid && \
131 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
132 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
137 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
139 /* Convert page order to size in bytes */
140 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
142 struct event_lpi_map {
143 unsigned long *lpi_map;
145 irq_hw_number_t lpi_base;
147 raw_spinlock_t vlpi_lock;
149 struct its_vlpi_map *vlpi_maps;
154 * The ITS view of a device - belongs to an ITS, owns an interrupt
155 * translation table, and a list of interrupts. If it some of its
156 * LPIs are injected into a guest (GICv4), the event_map.vm field
157 * indicates which one.
160 struct list_head entry;
161 struct its_node *its;
162 struct event_lpi_map event_map;
171 struct its_device *dev;
172 struct its_vpe **vpes;
176 struct cpu_lpi_count {
181 static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
183 static LIST_HEAD(its_nodes);
184 static DEFINE_RAW_SPINLOCK(its_lock);
185 static struct rdists *gic_rdists;
186 static struct irq_domain *its_parent;
188 static unsigned long its_list_map;
189 static u16 vmovp_seq_num;
190 static DEFINE_RAW_SPINLOCK(vmovp_lock);
192 static DEFINE_IDA(its_vpeid_ida);
194 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
195 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
196 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
197 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
200 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
201 * always have vSGIs mapped.
203 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
205 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
208 static u16 get_its_list(struct its_vm *vm)
210 struct its_node *its;
211 unsigned long its_list = 0;
213 list_for_each_entry(its, &its_nodes, entry) {
217 if (require_its_list_vmovp(vm, its))
218 __set_bit(its->list_nr, &its_list);
221 return (u16)its_list;
224 static inline u32 its_get_event_id(struct irq_data *d)
226 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
227 return d->hwirq - its_dev->event_map.lpi_base;
230 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
233 struct its_node *its = its_dev->its;
235 return its->collections + its_dev->event_map.col_map[event];
238 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
241 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
244 return &its_dev->event_map.vlpi_maps[event];
247 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
249 if (irqd_is_forwarded_to_vcpu(d)) {
250 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
251 u32 event = its_get_event_id(d);
253 return dev_event_to_vlpi_map(its_dev, event);
259 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
261 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
265 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
267 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
270 static struct irq_chip its_vpe_irq_chip;
272 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
274 struct its_vpe *vpe = NULL;
277 if (d->chip == &its_vpe_irq_chip) {
278 vpe = irq_data_get_irq_chip_data(d);
280 struct its_vlpi_map *map = get_vlpi_map(d);
286 cpu = vpe_to_cpuid_lock(vpe, flags);
288 /* Physical LPIs are already locked via the irq_desc lock */
289 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
290 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
291 /* Keep GCC quiet... */
298 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
300 struct its_vpe *vpe = NULL;
302 if (d->chip == &its_vpe_irq_chip) {
303 vpe = irq_data_get_irq_chip_data(d);
305 struct its_vlpi_map *map = get_vlpi_map(d);
311 vpe_to_cpuid_unlock(vpe, flags);
314 static struct its_collection *valid_col(struct its_collection *col)
316 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
322 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
324 if (valid_col(its->collections + vpe->col_idx))
331 * ITS command descriptors - parameters to be encoded in a command
334 struct its_cmd_desc {
337 struct its_device *dev;
342 struct its_device *dev;
347 struct its_device *dev;
352 struct its_device *dev;
357 struct its_collection *col;
362 struct its_device *dev;
368 struct its_device *dev;
369 struct its_collection *col;
374 struct its_device *dev;
379 struct its_collection *col;
388 struct its_collection *col;
394 struct its_device *dev;
402 struct its_device *dev;
409 struct its_collection *col;
430 * The ITS command block, which is what the ITS actually parses.
432 struct its_cmd_block {
435 __le64 raw_cmd_le[4];
439 #define ITS_CMD_QUEUE_SZ SZ_64K
440 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
442 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
443 struct its_cmd_block *,
444 struct its_cmd_desc *);
446 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
447 struct its_cmd_block *,
448 struct its_cmd_desc *);
450 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
452 u64 mask = GENMASK_ULL(h, l);
454 *raw_cmd |= (val << l) & mask;
457 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
459 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
462 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
464 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
467 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
469 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
472 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
474 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
477 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
479 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
482 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
484 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
487 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
489 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
492 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
494 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
497 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
499 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
502 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
504 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
507 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
509 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
512 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
514 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
517 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
519 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
522 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
524 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
527 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
529 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
532 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
534 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
537 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
539 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
542 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
544 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
547 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
549 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
552 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
554 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
557 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
560 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
563 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
566 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
569 static void its_encode_db(struct its_cmd_block *cmd, bool db)
571 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
574 static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
576 its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
579 static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
581 its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
584 static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
586 its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
589 static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
591 its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
594 static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
596 its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
599 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
601 /* Let's fixup BE commands */
602 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
603 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
604 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
605 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
608 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
609 struct its_cmd_block *cmd,
610 struct its_cmd_desc *desc)
612 unsigned long itt_addr;
613 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
615 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
616 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
618 its_encode_cmd(cmd, GITS_CMD_MAPD);
619 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
620 its_encode_size(cmd, size - 1);
621 its_encode_itt(cmd, itt_addr);
622 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
629 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
630 struct its_cmd_block *cmd,
631 struct its_cmd_desc *desc)
633 its_encode_cmd(cmd, GITS_CMD_MAPC);
634 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
635 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
636 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
640 return desc->its_mapc_cmd.col;
643 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
644 struct its_cmd_block *cmd,
645 struct its_cmd_desc *desc)
647 struct its_collection *col;
649 col = dev_event_to_col(desc->its_mapti_cmd.dev,
650 desc->its_mapti_cmd.event_id);
652 its_encode_cmd(cmd, GITS_CMD_MAPTI);
653 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
654 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
655 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
656 its_encode_collection(cmd, col->col_id);
660 return valid_col(col);
663 static struct its_collection *its_build_movi_cmd(struct its_node *its,
664 struct its_cmd_block *cmd,
665 struct its_cmd_desc *desc)
667 struct its_collection *col;
669 col = dev_event_to_col(desc->its_movi_cmd.dev,
670 desc->its_movi_cmd.event_id);
672 its_encode_cmd(cmd, GITS_CMD_MOVI);
673 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
674 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
675 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
679 return valid_col(col);
682 static struct its_collection *its_build_discard_cmd(struct its_node *its,
683 struct its_cmd_block *cmd,
684 struct its_cmd_desc *desc)
686 struct its_collection *col;
688 col = dev_event_to_col(desc->its_discard_cmd.dev,
689 desc->its_discard_cmd.event_id);
691 its_encode_cmd(cmd, GITS_CMD_DISCARD);
692 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
693 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
697 return valid_col(col);
700 static struct its_collection *its_build_inv_cmd(struct its_node *its,
701 struct its_cmd_block *cmd,
702 struct its_cmd_desc *desc)
704 struct its_collection *col;
706 col = dev_event_to_col(desc->its_inv_cmd.dev,
707 desc->its_inv_cmd.event_id);
709 its_encode_cmd(cmd, GITS_CMD_INV);
710 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
711 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
715 return valid_col(col);
718 static struct its_collection *its_build_int_cmd(struct its_node *its,
719 struct its_cmd_block *cmd,
720 struct its_cmd_desc *desc)
722 struct its_collection *col;
724 col = dev_event_to_col(desc->its_int_cmd.dev,
725 desc->its_int_cmd.event_id);
727 its_encode_cmd(cmd, GITS_CMD_INT);
728 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
729 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
733 return valid_col(col);
736 static struct its_collection *its_build_clear_cmd(struct its_node *its,
737 struct its_cmd_block *cmd,
738 struct its_cmd_desc *desc)
740 struct its_collection *col;
742 col = dev_event_to_col(desc->its_clear_cmd.dev,
743 desc->its_clear_cmd.event_id);
745 its_encode_cmd(cmd, GITS_CMD_CLEAR);
746 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
747 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
751 return valid_col(col);
754 static struct its_collection *its_build_invall_cmd(struct its_node *its,
755 struct its_cmd_block *cmd,
756 struct its_cmd_desc *desc)
758 its_encode_cmd(cmd, GITS_CMD_INVALL);
759 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
763 return desc->its_invall_cmd.col;
766 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
767 struct its_cmd_block *cmd,
768 struct its_cmd_desc *desc)
770 its_encode_cmd(cmd, GITS_CMD_VINVALL);
771 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
775 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
778 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
779 struct its_cmd_block *cmd,
780 struct its_cmd_desc *desc)
782 unsigned long vpt_addr, vconf_addr;
786 its_encode_cmd(cmd, GITS_CMD_VMAPP);
787 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
788 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
790 if (!desc->its_vmapp_cmd.valid) {
792 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
793 its_encode_alloc(cmd, alloc);
799 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
800 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
802 its_encode_target(cmd, target);
803 its_encode_vpt_addr(cmd, vpt_addr);
804 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
809 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
811 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
813 its_encode_alloc(cmd, alloc);
815 /* We can only signal PTZ when alloc==1. Why do we have two bits? */
816 its_encode_ptz(cmd, alloc);
817 its_encode_vconf_addr(cmd, vconf_addr);
818 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
823 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
826 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
827 struct its_cmd_block *cmd,
828 struct its_cmd_desc *desc)
832 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
833 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
837 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
838 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
839 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
840 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
841 its_encode_db_phys_id(cmd, db);
842 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
846 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
849 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
850 struct its_cmd_block *cmd,
851 struct its_cmd_desc *desc)
855 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
856 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
860 its_encode_cmd(cmd, GITS_CMD_VMOVI);
861 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
862 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
863 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
864 its_encode_db_phys_id(cmd, db);
865 its_encode_db_valid(cmd, true);
869 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
872 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
873 struct its_cmd_block *cmd,
874 struct its_cmd_desc *desc)
878 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
879 its_encode_cmd(cmd, GITS_CMD_VMOVP);
880 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
881 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
882 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
883 its_encode_target(cmd, target);
886 its_encode_db(cmd, true);
887 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
892 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
895 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
896 struct its_cmd_block *cmd,
897 struct its_cmd_desc *desc)
899 struct its_vlpi_map *map;
901 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
902 desc->its_inv_cmd.event_id);
904 its_encode_cmd(cmd, GITS_CMD_INV);
905 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
906 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
910 return valid_vpe(its, map->vpe);
913 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
914 struct its_cmd_block *cmd,
915 struct its_cmd_desc *desc)
917 struct its_vlpi_map *map;
919 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
920 desc->its_int_cmd.event_id);
922 its_encode_cmd(cmd, GITS_CMD_INT);
923 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
924 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
928 return valid_vpe(its, map->vpe);
931 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
932 struct its_cmd_block *cmd,
933 struct its_cmd_desc *desc)
935 struct its_vlpi_map *map;
937 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
938 desc->its_clear_cmd.event_id);
940 its_encode_cmd(cmd, GITS_CMD_CLEAR);
941 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
942 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
946 return valid_vpe(its, map->vpe);
949 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
950 struct its_cmd_block *cmd,
951 struct its_cmd_desc *desc)
953 if (WARN_ON(!is_v4_1(its)))
956 its_encode_cmd(cmd, GITS_CMD_INVDB);
957 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
961 return valid_vpe(its, desc->its_invdb_cmd.vpe);
964 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
965 struct its_cmd_block *cmd,
966 struct its_cmd_desc *desc)
968 if (WARN_ON(!is_v4_1(its)))
971 its_encode_cmd(cmd, GITS_CMD_VSGI);
972 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
973 its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
974 its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
975 its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
976 its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
977 its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
981 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
984 static u64 its_cmd_ptr_to_offset(struct its_node *its,
985 struct its_cmd_block *ptr)
987 return (ptr - its->cmd_base) * sizeof(*ptr);
990 static int its_queue_full(struct its_node *its)
995 widx = its->cmd_write - its->cmd_base;
996 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
998 /* This is incredibly unlikely to happen, unless the ITS locks up. */
999 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
1005 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1007 struct its_cmd_block *cmd;
1008 u32 count = 1000000; /* 1s! */
1010 while (its_queue_full(its)) {
1013 pr_err_ratelimited("ITS queue not draining\n");
1020 cmd = its->cmd_write++;
1022 /* Handle queue wrapping */
1023 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1024 its->cmd_write = its->cmd_base;
1027 cmd->raw_cmd[0] = 0;
1028 cmd->raw_cmd[1] = 0;
1029 cmd->raw_cmd[2] = 0;
1030 cmd->raw_cmd[3] = 0;
1035 static struct its_cmd_block *its_post_commands(struct its_node *its)
1037 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1039 writel_relaxed(wr, its->base + GITS_CWRITER);
1041 return its->cmd_write;
1044 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1047 * Make sure the commands written to memory are observable by
1050 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1051 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1056 static int its_wait_for_range_completion(struct its_node *its,
1058 struct its_cmd_block *to)
1060 u64 rd_idx, to_idx, linear_idx;
1061 u32 count = 1000000; /* 1s! */
1063 /* Linearize to_idx if the command set has wrapped around */
1064 to_idx = its_cmd_ptr_to_offset(its, to);
1065 if (to_idx < prev_idx)
1066 to_idx += ITS_CMD_QUEUE_SZ;
1068 linear_idx = prev_idx;
1073 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1076 * Compute the read pointer progress, taking the
1077 * potential wrap-around into account.
1079 delta = rd_idx - prev_idx;
1080 if (rd_idx < prev_idx)
1081 delta += ITS_CMD_QUEUE_SZ;
1083 linear_idx += delta;
1084 if (linear_idx >= to_idx)
1089 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1090 to_idx, linear_idx);
1101 /* Warning, macro hell follows */
1102 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
1103 void name(struct its_node *its, \
1104 buildtype builder, \
1105 struct its_cmd_desc *desc) \
1107 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
1108 synctype *sync_obj; \
1109 unsigned long flags; \
1112 raw_spin_lock_irqsave(&its->lock, flags); \
1114 cmd = its_allocate_entry(its); \
1115 if (!cmd) { /* We're soooooo screewed... */ \
1116 raw_spin_unlock_irqrestore(&its->lock, flags); \
1119 sync_obj = builder(its, cmd, desc); \
1120 its_flush_cmd(its, cmd); \
1123 sync_cmd = its_allocate_entry(its); \
1127 buildfn(its, sync_cmd, sync_obj); \
1128 its_flush_cmd(its, sync_cmd); \
1132 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1133 next_cmd = its_post_commands(its); \
1134 raw_spin_unlock_irqrestore(&its->lock, flags); \
1136 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1137 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1140 static void its_build_sync_cmd(struct its_node *its,
1141 struct its_cmd_block *sync_cmd,
1142 struct its_collection *sync_col)
1144 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1145 its_encode_target(sync_cmd, sync_col->target_address);
1147 its_fixup_cmd(sync_cmd);
1150 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1151 struct its_collection, its_build_sync_cmd)
1153 static void its_build_vsync_cmd(struct its_node *its,
1154 struct its_cmd_block *sync_cmd,
1155 struct its_vpe *sync_vpe)
1157 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1158 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1160 its_fixup_cmd(sync_cmd);
1163 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1164 struct its_vpe, its_build_vsync_cmd)
1166 static void its_send_int(struct its_device *dev, u32 event_id)
1168 struct its_cmd_desc desc;
1170 desc.its_int_cmd.dev = dev;
1171 desc.its_int_cmd.event_id = event_id;
1173 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1176 static void its_send_clear(struct its_device *dev, u32 event_id)
1178 struct its_cmd_desc desc;
1180 desc.its_clear_cmd.dev = dev;
1181 desc.its_clear_cmd.event_id = event_id;
1183 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1186 static void its_send_inv(struct its_device *dev, u32 event_id)
1188 struct its_cmd_desc desc;
1190 desc.its_inv_cmd.dev = dev;
1191 desc.its_inv_cmd.event_id = event_id;
1193 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1196 static void its_send_mapd(struct its_device *dev, int valid)
1198 struct its_cmd_desc desc;
1200 desc.its_mapd_cmd.dev = dev;
1201 desc.its_mapd_cmd.valid = !!valid;
1203 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1206 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1209 struct its_cmd_desc desc;
1211 desc.its_mapc_cmd.col = col;
1212 desc.its_mapc_cmd.valid = !!valid;
1214 its_send_single_command(its, its_build_mapc_cmd, &desc);
1217 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1219 struct its_cmd_desc desc;
1221 desc.its_mapti_cmd.dev = dev;
1222 desc.its_mapti_cmd.phys_id = irq_id;
1223 desc.its_mapti_cmd.event_id = id;
1225 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1228 static void its_send_movi(struct its_device *dev,
1229 struct its_collection *col, u32 id)
1231 struct its_cmd_desc desc;
1233 desc.its_movi_cmd.dev = dev;
1234 desc.its_movi_cmd.col = col;
1235 desc.its_movi_cmd.event_id = id;
1237 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1240 static void its_send_discard(struct its_device *dev, u32 id)
1242 struct its_cmd_desc desc;
1244 desc.its_discard_cmd.dev = dev;
1245 desc.its_discard_cmd.event_id = id;
1247 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1250 static void its_send_invall(struct its_node *its, struct its_collection *col)
1252 struct its_cmd_desc desc;
1254 desc.its_invall_cmd.col = col;
1256 its_send_single_command(its, its_build_invall_cmd, &desc);
1259 static void its_send_vmapti(struct its_device *dev, u32 id)
1261 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1262 struct its_cmd_desc desc;
1264 desc.its_vmapti_cmd.vpe = map->vpe;
1265 desc.its_vmapti_cmd.dev = dev;
1266 desc.its_vmapti_cmd.virt_id = map->vintid;
1267 desc.its_vmapti_cmd.event_id = id;
1268 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1270 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1273 static void its_send_vmovi(struct its_device *dev, u32 id)
1275 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1276 struct its_cmd_desc desc;
1278 desc.its_vmovi_cmd.vpe = map->vpe;
1279 desc.its_vmovi_cmd.dev = dev;
1280 desc.its_vmovi_cmd.event_id = id;
1281 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1283 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1286 static void its_send_vmapp(struct its_node *its,
1287 struct its_vpe *vpe, bool valid)
1289 struct its_cmd_desc desc;
1291 desc.its_vmapp_cmd.vpe = vpe;
1292 desc.its_vmapp_cmd.valid = valid;
1293 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1295 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1298 static void its_send_vmovp(struct its_vpe *vpe)
1300 struct its_cmd_desc desc = {};
1301 struct its_node *its;
1302 unsigned long flags;
1303 int col_id = vpe->col_idx;
1305 desc.its_vmovp_cmd.vpe = vpe;
1307 if (!its_list_map) {
1308 its = list_first_entry(&its_nodes, struct its_node, entry);
1309 desc.its_vmovp_cmd.col = &its->collections[col_id];
1310 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1315 * Yet another marvel of the architecture. If using the
1316 * its_list "feature", we need to make sure that all ITSs
1317 * receive all VMOVP commands in the same order. The only way
1318 * to guarantee this is to make vmovp a serialization point.
1322 raw_spin_lock_irqsave(&vmovp_lock, flags);
1324 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1325 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1328 list_for_each_entry(its, &its_nodes, entry) {
1332 if (!require_its_list_vmovp(vpe->its_vm, its))
1335 desc.its_vmovp_cmd.col = &its->collections[col_id];
1336 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1339 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1342 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1344 struct its_cmd_desc desc;
1346 desc.its_vinvall_cmd.vpe = vpe;
1347 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1350 static void its_send_vinv(struct its_device *dev, u32 event_id)
1352 struct its_cmd_desc desc;
1355 * There is no real VINV command. This is just a normal INV,
1356 * with a VSYNC instead of a SYNC.
1358 desc.its_inv_cmd.dev = dev;
1359 desc.its_inv_cmd.event_id = event_id;
1361 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1364 static void its_send_vint(struct its_device *dev, u32 event_id)
1366 struct its_cmd_desc desc;
1369 * There is no real VINT command. This is just a normal INT,
1370 * with a VSYNC instead of a SYNC.
1372 desc.its_int_cmd.dev = dev;
1373 desc.its_int_cmd.event_id = event_id;
1375 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1378 static void its_send_vclear(struct its_device *dev, u32 event_id)
1380 struct its_cmd_desc desc;
1383 * There is no real VCLEAR command. This is just a normal CLEAR,
1384 * with a VSYNC instead of a SYNC.
1386 desc.its_clear_cmd.dev = dev;
1387 desc.its_clear_cmd.event_id = event_id;
1389 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1392 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1394 struct its_cmd_desc desc;
1396 desc.its_invdb_cmd.vpe = vpe;
1397 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1401 * irqchip functions - assumes MSI, mostly.
1403 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1405 struct its_vlpi_map *map = get_vlpi_map(d);
1406 irq_hw_number_t hwirq;
1411 va = page_address(map->vm->vprop_page);
1412 hwirq = map->vintid;
1414 /* Remember the updated property */
1415 map->properties &= ~clr;
1416 map->properties |= set | LPI_PROP_GROUP1;
1418 va = gic_rdists->prop_table_va;
1422 cfg = va + hwirq - 8192;
1424 *cfg |= set | LPI_PROP_GROUP1;
1427 * Make the above write visible to the redistributors.
1428 * And yes, we're flushing exactly: One. Single. Byte.
1431 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1432 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1437 static void wait_for_syncr(void __iomem *rdbase)
1439 while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1443 static void __direct_lpi_inv(struct irq_data *d, u64 val)
1445 void __iomem *rdbase;
1446 unsigned long flags;
1449 /* Target the redistributor this LPI is currently routed to */
1450 cpu = irq_to_cpuid_lock(d, &flags);
1451 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1453 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1454 gic_write_lpir(val, rdbase + GICR_INVLPIR);
1455 wait_for_syncr(rdbase);
1457 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1458 irq_to_cpuid_unlock(d, flags);
1461 static void direct_lpi_inv(struct irq_data *d)
1463 struct its_vlpi_map *map = get_vlpi_map(d);
1467 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1469 WARN_ON(!is_v4_1(its_dev->its));
1471 val = GICR_INVLPIR_V;
1472 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1473 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1478 __direct_lpi_inv(d, val);
1481 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1483 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1485 lpi_write_config(d, clr, set);
1486 if (gic_rdists->has_direct_lpi &&
1487 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1489 else if (!irqd_is_forwarded_to_vcpu(d))
1490 its_send_inv(its_dev, its_get_event_id(d));
1492 its_send_vinv(its_dev, its_get_event_id(d));
1495 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1497 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1498 u32 event = its_get_event_id(d);
1499 struct its_vlpi_map *map;
1502 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1505 if (is_v4_1(its_dev->its))
1508 map = dev_event_to_vlpi_map(its_dev, event);
1510 if (map->db_enabled == enable)
1513 map->db_enabled = enable;
1516 * More fun with the architecture:
1518 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1519 * value or to 1023, depending on the enable bit. But that
1520 * would be issuing a mapping for an /existing/ DevID+EventID
1521 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1522 * to the /same/ vPE, using this opportunity to adjust the
1523 * doorbell. Mouahahahaha. We loves it, Precious.
1525 its_send_vmovi(its_dev, event);
1528 static void its_mask_irq(struct irq_data *d)
1530 if (irqd_is_forwarded_to_vcpu(d))
1531 its_vlpi_set_doorbell(d, false);
1533 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1536 static void its_unmask_irq(struct irq_data *d)
1538 if (irqd_is_forwarded_to_vcpu(d))
1539 its_vlpi_set_doorbell(d, true);
1541 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1544 static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1546 if (irqd_affinity_is_managed(d))
1547 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1549 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1552 static void its_inc_lpi_count(struct irq_data *d, int cpu)
1554 if (irqd_affinity_is_managed(d))
1555 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1557 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1560 static void its_dec_lpi_count(struct irq_data *d, int cpu)
1562 if (irqd_affinity_is_managed(d))
1563 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1565 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1568 static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1569 const struct cpumask *cpu_mask)
1571 unsigned int cpu = nr_cpu_ids, tmp;
1572 int count = S32_MAX;
1574 for_each_cpu(tmp, cpu_mask) {
1575 int this_count = its_read_lpi_count(d, tmp);
1576 if (this_count < count) {
1586 * As suggested by Thomas Gleixner in:
1587 * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1589 static int its_select_cpu(struct irq_data *d,
1590 const struct cpumask *aff_mask)
1592 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1593 cpumask_var_t tmpmask;
1596 if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
1599 node = its_dev->its->numa_node;
1601 if (!irqd_affinity_is_managed(d)) {
1602 /* First try the NUMA node */
1603 if (node != NUMA_NO_NODE) {
1605 * Try the intersection of the affinity mask and the
1606 * node mask (and the online mask, just to be safe).
1608 cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1609 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1612 * Ideally, we would check if the mask is empty, and
1613 * try again on the full node here.
1615 * But it turns out that the way ACPI describes the
1616 * affinity for ITSs only deals about memory, and
1617 * not target CPUs, so it cannot describe a single
1618 * ITS placed next to two NUMA nodes.
1620 * Instead, just fallback on the online mask. This
1621 * diverges from Thomas' suggestion above.
1623 cpu = cpumask_pick_least_loaded(d, tmpmask);
1624 if (cpu < nr_cpu_ids)
1627 /* If we can't cross sockets, give up */
1628 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1631 /* If the above failed, expand the search */
1634 /* Try the intersection of the affinity and online masks */
1635 cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1637 /* If that doesn't fly, the online mask is the last resort */
1638 if (cpumask_empty(tmpmask))
1639 cpumask_copy(tmpmask, cpu_online_mask);
1641 cpu = cpumask_pick_least_loaded(d, tmpmask);
1643 cpumask_copy(tmpmask, aff_mask);
1645 /* If we cannot cross sockets, limit the search to that node */
1646 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1647 node != NUMA_NO_NODE)
1648 cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1650 cpu = cpumask_pick_least_loaded(d, tmpmask);
1653 free_cpumask_var(tmpmask);
1655 pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1659 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1662 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1663 struct its_collection *target_col;
1664 u32 id = its_get_event_id(d);
1667 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1668 if (irqd_is_forwarded_to_vcpu(d))
1671 prev_cpu = its_dev->event_map.col_map[id];
1672 its_dec_lpi_count(d, prev_cpu);
1675 cpu = its_select_cpu(d, mask_val);
1677 cpu = cpumask_pick_least_loaded(d, mask_val);
1679 if (cpu < 0 || cpu >= nr_cpu_ids)
1682 /* don't set the affinity when the target cpu is same as current one */
1683 if (cpu != prev_cpu) {
1684 target_col = &its_dev->its->collections[cpu];
1685 its_send_movi(its_dev, target_col, id);
1686 its_dev->event_map.col_map[id] = cpu;
1687 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1690 its_inc_lpi_count(d, cpu);
1692 return IRQ_SET_MASK_OK_DONE;
1695 its_inc_lpi_count(d, prev_cpu);
1699 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1701 struct its_node *its = its_dev->its;
1703 return its->phys_base + GITS_TRANSLATER;
1706 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1708 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1709 struct its_node *its;
1713 addr = its->get_msi_base(its_dev);
1715 msg->address_lo = lower_32_bits(addr);
1716 msg->address_hi = upper_32_bits(addr);
1717 msg->data = its_get_event_id(d);
1719 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1722 static int its_irq_set_irqchip_state(struct irq_data *d,
1723 enum irqchip_irq_state which,
1726 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1727 u32 event = its_get_event_id(d);
1729 if (which != IRQCHIP_STATE_PENDING)
1732 if (irqd_is_forwarded_to_vcpu(d)) {
1734 its_send_vint(its_dev, event);
1736 its_send_vclear(its_dev, event);
1739 its_send_int(its_dev, event);
1741 its_send_clear(its_dev, event);
1747 static int its_irq_retrigger(struct irq_data *d)
1749 return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1753 * Two favourable cases:
1755 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1758 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1759 * and we're better off mapping all VPEs always
1761 * If neither (a) nor (b) is true, then we map vPEs on demand.
1764 static bool gic_requires_eager_mapping(void)
1766 if (!its_list_map || gic_rdists->has_rvpeid)
1772 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1774 unsigned long flags;
1776 if (gic_requires_eager_mapping())
1779 raw_spin_lock_irqsave(&vmovp_lock, flags);
1782 * If the VM wasn't mapped yet, iterate over the vpes and get
1785 vm->vlpi_count[its->list_nr]++;
1787 if (vm->vlpi_count[its->list_nr] == 1) {
1790 for (i = 0; i < vm->nr_vpes; i++) {
1791 struct its_vpe *vpe = vm->vpes[i];
1792 struct irq_data *d = irq_get_irq_data(vpe->irq);
1794 /* Map the VPE to the first possible CPU */
1795 vpe->col_idx = cpumask_first(cpu_online_mask);
1796 its_send_vmapp(its, vpe, true);
1797 its_send_vinvall(its, vpe);
1798 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1802 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1805 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1807 unsigned long flags;
1809 /* Not using the ITS list? Everything is always mapped. */
1810 if (gic_requires_eager_mapping())
1813 raw_spin_lock_irqsave(&vmovp_lock, flags);
1815 if (!--vm->vlpi_count[its->list_nr]) {
1818 for (i = 0; i < vm->nr_vpes; i++)
1819 its_send_vmapp(its, vm->vpes[i], false);
1822 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1825 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1827 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1828 u32 event = its_get_event_id(d);
1834 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1836 if (!its_dev->event_map.vm) {
1837 struct its_vlpi_map *maps;
1839 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1846 its_dev->event_map.vm = info->map->vm;
1847 its_dev->event_map.vlpi_maps = maps;
1848 } else if (its_dev->event_map.vm != info->map->vm) {
1853 /* Get our private copy of the mapping information */
1854 its_dev->event_map.vlpi_maps[event] = *info->map;
1856 if (irqd_is_forwarded_to_vcpu(d)) {
1857 /* Already mapped, move it around */
1858 its_send_vmovi(its_dev, event);
1860 /* Ensure all the VPEs are mapped on this ITS */
1861 its_map_vm(its_dev->its, info->map->vm);
1864 * Flag the interrupt as forwarded so that we can
1865 * start poking the virtual property table.
1867 irqd_set_forwarded_to_vcpu(d);
1869 /* Write out the property to the prop table */
1870 lpi_write_config(d, 0xff, info->map->properties);
1872 /* Drop the physical mapping */
1873 its_send_discard(its_dev, event);
1875 /* and install the virtual one */
1876 its_send_vmapti(its_dev, event);
1878 /* Increment the number of VLPIs */
1879 its_dev->event_map.nr_vlpis++;
1883 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1887 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1889 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1890 struct its_vlpi_map *map;
1893 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1895 map = get_vlpi_map(d);
1897 if (!its_dev->event_map.vm || !map) {
1902 /* Copy our mapping information to the incoming request */
1906 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1910 static int its_vlpi_unmap(struct irq_data *d)
1912 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1913 u32 event = its_get_event_id(d);
1916 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1918 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1923 /* Drop the virtual mapping */
1924 its_send_discard(its_dev, event);
1926 /* and restore the physical one */
1927 irqd_clr_forwarded_to_vcpu(d);
1928 its_send_mapti(its_dev, d->hwirq, event);
1929 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1933 /* Potentially unmap the VM from this ITS */
1934 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1937 * Drop the refcount and make the device available again if
1938 * this was the last VLPI.
1940 if (!--its_dev->event_map.nr_vlpis) {
1941 its_dev->event_map.vm = NULL;
1942 kfree(its_dev->event_map.vlpi_maps);
1946 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1950 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1952 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1954 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1957 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1958 lpi_update_config(d, 0xff, info->config);
1960 lpi_write_config(d, 0xff, info->config);
1961 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1966 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1968 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1969 struct its_cmd_info *info = vcpu_info;
1972 if (!is_v4(its_dev->its))
1975 /* Unmap request? */
1977 return its_vlpi_unmap(d);
1979 switch (info->cmd_type) {
1981 return its_vlpi_map(d, info);
1984 return its_vlpi_get(d, info);
1986 case PROP_UPDATE_VLPI:
1987 case PROP_UPDATE_AND_INV_VLPI:
1988 return its_vlpi_prop_update(d, info);
1995 static struct irq_chip its_irq_chip = {
1997 .irq_mask = its_mask_irq,
1998 .irq_unmask = its_unmask_irq,
1999 .irq_eoi = irq_chip_eoi_parent,
2000 .irq_set_affinity = its_set_affinity,
2001 .irq_compose_msi_msg = its_irq_compose_msi_msg,
2002 .irq_set_irqchip_state = its_irq_set_irqchip_state,
2003 .irq_retrigger = its_irq_retrigger,
2004 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
2009 * How we allocate LPIs:
2011 * lpi_range_list contains ranges of LPIs that are to available to
2012 * allocate from. To allocate LPIs, just pick the first range that
2013 * fits the required allocation, and reduce it by the required
2014 * amount. Once empty, remove the range from the list.
2016 * To free a range of LPIs, add a free range to the list, sort it and
2017 * merge the result if the new range happens to be adjacent to an
2018 * already free block.
2020 * The consequence of the above is that allocation is cost is low, but
2021 * freeing is expensive. We assumes that freeing rarely occurs.
2023 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
2025 static DEFINE_MUTEX(lpi_range_lock);
2026 static LIST_HEAD(lpi_range_list);
2029 struct list_head entry;
2034 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2036 struct lpi_range *range;
2038 range = kmalloc(sizeof(*range), GFP_KERNEL);
2040 range->base_id = base;
2047 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2049 struct lpi_range *range, *tmp;
2052 mutex_lock(&lpi_range_lock);
2054 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2055 if (range->span >= nr_lpis) {
2056 *base = range->base_id;
2057 range->base_id += nr_lpis;
2058 range->span -= nr_lpis;
2060 if (range->span == 0) {
2061 list_del(&range->entry);
2070 mutex_unlock(&lpi_range_lock);
2072 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2076 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2078 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2080 if (a->base_id + a->span != b->base_id)
2082 b->base_id = a->base_id;
2084 list_del(&a->entry);
2088 static int free_lpi_range(u32 base, u32 nr_lpis)
2090 struct lpi_range *new, *old;
2092 new = mk_lpi_range(base, nr_lpis);
2096 mutex_lock(&lpi_range_lock);
2098 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2099 if (old->base_id < base)
2103 * old is the last element with ->base_id smaller than base,
2104 * so new goes right after it. If there are no elements with
2105 * ->base_id smaller than base, &old->entry ends up pointing
2106 * at the head of the list, and inserting new it the start of
2107 * the list is the right thing to do in that case as well.
2109 list_add(&new->entry, &old->entry);
2111 * Now check if we can merge with the preceding and/or
2114 merge_lpi_ranges(old, new);
2115 merge_lpi_ranges(new, list_next_entry(new, entry));
2117 mutex_unlock(&lpi_range_lock);
2121 static int __init its_lpi_init(u32 id_bits)
2123 u32 lpis = (1UL << id_bits) - 8192;
2127 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2129 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2131 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2136 * Initializing the allocator is just the same as freeing the
2137 * full range of LPIs.
2139 err = free_lpi_range(8192, lpis);
2140 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2144 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2146 unsigned long *bitmap = NULL;
2150 err = alloc_lpi_range(nr_irqs, base);
2155 } while (nr_irqs > 0);
2163 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
2171 *base = *nr_ids = 0;
2176 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2178 WARN_ON(free_lpi_range(base, nr_ids));
2182 static void gic_reset_prop_table(void *va)
2184 /* Priority 0xa0, Group-1, disabled */
2185 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2187 /* Make sure the GIC will observe the written configuration */
2188 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2191 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2193 struct page *prop_page;
2195 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
2199 gic_reset_prop_table(page_address(prop_page));
2204 static void its_free_prop_table(struct page *prop_page)
2206 free_pages((unsigned long)page_address(prop_page),
2207 get_order(LPI_PROPBASE_SZ));
2210 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2212 phys_addr_t start, end, addr_end;
2216 * We don't bother checking for a kdump kernel as by
2217 * construction, the LPI tables are out of this kernel's
2220 if (is_kdump_kernel())
2223 addr_end = addr + size - 1;
2225 for_each_reserved_mem_range(i, &start, &end) {
2226 if (addr >= start && addr_end <= end)
2230 /* Not found, not a good sign... */
2231 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2233 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2237 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2239 if (efi_enabled(EFI_CONFIG_TABLES))
2240 return efi_mem_reserve_persistent(addr, size);
2245 static int __init its_setup_lpi_prop_table(void)
2247 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2250 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2251 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2253 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2254 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2257 gic_reset_prop_table(gic_rdists->prop_table_va);
2261 lpi_id_bits = min_t(u32,
2262 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2263 ITS_MAX_LPI_NRBITS);
2264 page = its_allocate_prop_table(GFP_NOWAIT);
2266 pr_err("Failed to allocate PROPBASE\n");
2270 gic_rdists->prop_table_pa = page_to_phys(page);
2271 gic_rdists->prop_table_va = page_address(page);
2272 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2276 pr_info("GICv3: using LPI property table @%pa\n",
2277 &gic_rdists->prop_table_pa);
2279 return its_lpi_init(lpi_id_bits);
2282 static const char *its_base_type_string[] = {
2283 [GITS_BASER_TYPE_DEVICE] = "Devices",
2284 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
2285 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2286 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
2287 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
2288 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
2289 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
2292 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2294 u32 idx = baser - its->tables;
2296 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2299 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2302 u32 idx = baser - its->tables;
2304 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2305 baser->val = its_read_baser(its, baser);
2308 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2309 u64 cache, u64 shr, u32 order, bool indirect)
2311 u64 val = its_read_baser(its, baser);
2312 u64 esz = GITS_BASER_ENTRY_SIZE(val);
2313 u64 type = GITS_BASER_TYPE(val);
2314 u64 baser_phys, tmp;
2315 u32 alloc_pages, psz;
2320 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2321 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2322 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2323 &its->phys_base, its_base_type_string[type],
2324 alloc_pages, GITS_BASER_PAGES_MAX);
2325 alloc_pages = GITS_BASER_PAGES_MAX;
2326 order = get_order(GITS_BASER_PAGES_MAX * psz);
2329 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2333 base = (void *)page_address(page);
2334 baser_phys = virt_to_phys(base);
2336 /* Check if the physical address of the memory is above 48bits */
2337 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2339 /* 52bit PA is supported only when PageSize=64K */
2340 if (psz != SZ_64K) {
2341 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2342 free_pages((unsigned long)base, order);
2346 /* Convert 52bit PA to 48bit field */
2347 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2352 (type << GITS_BASER_TYPE_SHIFT) |
2353 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2354 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2359 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2363 val |= GITS_BASER_PAGE_SIZE_4K;
2366 val |= GITS_BASER_PAGE_SIZE_16K;
2369 val |= GITS_BASER_PAGE_SIZE_64K;
2373 its_write_baser(its, baser, val);
2376 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2378 * Shareability didn't stick. Just use
2379 * whatever the read reported, which is likely
2380 * to be the only thing this redistributor
2381 * supports. If that's zero, make it
2382 * non-cacheable as well.
2384 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2386 cache = GITS_BASER_nC;
2387 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2393 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2394 &its->phys_base, its_base_type_string[type],
2396 free_pages((unsigned long)base, order);
2400 baser->order = order;
2403 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2405 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2406 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2407 its_base_type_string[type],
2408 (unsigned long)virt_to_phys(base),
2409 indirect ? "indirect" : "flat", (int)esz,
2410 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2415 static bool its_parse_indirect_baser(struct its_node *its,
2416 struct its_baser *baser,
2417 u32 *order, u32 ids)
2419 u64 tmp = its_read_baser(its, baser);
2420 u64 type = GITS_BASER_TYPE(tmp);
2421 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2422 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2423 u32 new_order = *order;
2424 u32 psz = baser->psz;
2425 bool indirect = false;
2427 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2428 if ((esz << ids) > (psz * 2)) {
2430 * Find out whether hw supports a single or two-level table by
2431 * table by reading bit at offset '62' after writing '1' to it.
2433 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2434 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2438 * The size of the lvl2 table is equal to ITS page size
2439 * which is 'psz'. For computing lvl1 table size,
2440 * subtract ID bits that sparse lvl2 table from 'ids'
2441 * which is reported by ITS hardware times lvl1 table
2444 ids -= ilog2(psz / (int)esz);
2445 esz = GITS_LVL1_ENTRY_SIZE;
2450 * Allocate as many entries as required to fit the
2451 * range of device IDs that the ITS can grok... The ID
2452 * space being incredibly sparse, this results in a
2453 * massive waste of memory if two-level device table
2454 * feature is not supported by hardware.
2456 new_order = max_t(u32, get_order(esz << ids), new_order);
2457 if (new_order >= MAX_ORDER) {
2458 new_order = MAX_ORDER - 1;
2459 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2460 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2461 &its->phys_base, its_base_type_string[type],
2462 device_ids(its), ids);
2470 static u32 compute_common_aff(u64 val)
2474 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2475 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2477 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2480 static u32 compute_its_aff(struct its_node *its)
2486 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2487 * the resulting affinity. We then use that to see if this match
2490 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2491 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2492 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2493 return compute_common_aff(val);
2496 static struct its_node *find_sibling_its(struct its_node *cur_its)
2498 struct its_node *its;
2501 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2504 aff = compute_its_aff(cur_its);
2506 list_for_each_entry(its, &its_nodes, entry) {
2509 if (!is_v4_1(its) || its == cur_its)
2512 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2515 if (aff != compute_its_aff(its))
2518 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2519 baser = its->tables[2].val;
2520 if (!(baser & GITS_BASER_VALID))
2529 static void its_free_tables(struct its_node *its)
2533 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2534 if (its->tables[i].base) {
2535 free_pages((unsigned long)its->tables[i].base,
2536 its->tables[i].order);
2537 its->tables[i].base = NULL;
2542 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2549 val = its_read_baser(its, baser);
2550 val &= ~GITS_BASER_PAGE_SIZE_MASK;
2554 gpsz = GITS_BASER_PAGE_SIZE_64K;
2557 gpsz = GITS_BASER_PAGE_SIZE_16K;
2561 gpsz = GITS_BASER_PAGE_SIZE_4K;
2565 gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2567 val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2568 its_write_baser(its, baser, val);
2570 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2590 static int its_alloc_tables(struct its_node *its)
2592 u64 shr = GITS_BASER_InnerShareable;
2593 u64 cache = GITS_BASER_RaWaWb;
2596 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2597 /* erratum 24313: ignore memory access type */
2598 cache = GITS_BASER_nCnB;
2600 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2601 struct its_baser *baser = its->tables + i;
2602 u64 val = its_read_baser(its, baser);
2603 u64 type = GITS_BASER_TYPE(val);
2604 bool indirect = false;
2607 if (type == GITS_BASER_TYPE_NONE)
2610 if (its_probe_baser_psz(its, baser)) {
2611 its_free_tables(its);
2615 order = get_order(baser->psz);
2618 case GITS_BASER_TYPE_DEVICE:
2619 indirect = its_parse_indirect_baser(its, baser, &order,
2623 case GITS_BASER_TYPE_VCPU:
2625 struct its_node *sibling;
2628 if ((sibling = find_sibling_its(its))) {
2629 *baser = sibling->tables[2];
2630 its_write_baser(its, baser, baser->val);
2635 indirect = its_parse_indirect_baser(its, baser, &order,
2636 ITS_MAX_VPEID_BITS);
2640 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2642 its_free_tables(its);
2646 /* Update settings which will be used for next BASERn */
2647 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2648 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2654 static u64 inherit_vpe_l1_table_from_its(void)
2656 struct its_node *its;
2660 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2661 aff = compute_common_aff(val);
2663 list_for_each_entry(its, &its_nodes, entry) {
2669 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2672 if (aff != compute_its_aff(its))
2675 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2676 baser = its->tables[2].val;
2677 if (!(baser & GITS_BASER_VALID))
2680 /* We have a winner! */
2681 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2683 val = GICR_VPROPBASER_4_1_VALID;
2684 if (baser & GITS_BASER_INDIRECT)
2685 val |= GICR_VPROPBASER_4_1_INDIRECT;
2686 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2687 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2688 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2689 case GIC_PAGE_SIZE_64K:
2690 addr = GITS_BASER_ADDR_48_to_52(baser);
2693 addr = baser & GENMASK_ULL(47, 12);
2696 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2697 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2698 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2699 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2700 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2701 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2709 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2715 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2716 aff = compute_common_aff(val);
2718 for_each_possible_cpu(cpu) {
2719 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2721 if (!base || cpu == smp_processor_id())
2724 val = gic_read_typer(base + GICR_TYPER);
2725 if (aff != compute_common_aff(val))
2729 * At this point, we have a victim. This particular CPU
2730 * has already booted, and has an affinity that matches
2731 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2732 * Make sure we don't write the Z bit in that case.
2734 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2735 val &= ~GICR_VPROPBASER_4_1_Z;
2737 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2738 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2746 static bool allocate_vpe_l2_table(int cpu, u32 id)
2748 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2749 unsigned int psz, esz, idx, npg, gpsz;
2754 if (!gic_rdists->has_rvpeid)
2757 /* Skip non-present CPUs */
2761 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2763 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2764 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2765 npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2771 case GIC_PAGE_SIZE_4K:
2774 case GIC_PAGE_SIZE_16K:
2777 case GIC_PAGE_SIZE_64K:
2782 /* Don't allow vpe_id that exceeds single, flat table limit */
2783 if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2784 return (id < (npg * psz / (esz * SZ_8)));
2786 /* Compute 1st level table index & check if that exceeds table limit */
2787 idx = id >> ilog2(psz / (esz * SZ_8));
2788 if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2791 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2793 /* Allocate memory for 2nd level table */
2795 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2799 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2800 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2801 gic_flush_dcache_to_poc(page_address(page), psz);
2803 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2805 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2806 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2807 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2809 /* Ensure updated table contents are visible to RD hardware */
2816 static int allocate_vpe_l1_table(void)
2818 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2819 u64 val, gpsz, npg, pa;
2820 unsigned int psz = SZ_64K;
2821 unsigned int np, epp, esz;
2824 if (!gic_rdists->has_rvpeid)
2828 * if VPENDBASER.Valid is set, disable any previously programmed
2829 * VPE by setting PendingLast while clearing Valid. This has the
2830 * effect of making sure no doorbell will be generated and we can
2831 * then safely clear VPROPBASER.Valid.
2833 if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2834 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2835 vlpi_base + GICR_VPENDBASER);
2838 * If we can inherit the configuration from another RD, let's do
2839 * so. Otherwise, we have to go through the allocation process. We
2840 * assume that all RDs have the exact same requirements, as
2841 * nothing will work otherwise.
2843 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2844 if (val & GICR_VPROPBASER_4_1_VALID)
2847 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2848 if (!gic_data_rdist()->vpe_table_mask)
2851 val = inherit_vpe_l1_table_from_its();
2852 if (val & GICR_VPROPBASER_4_1_VALID)
2855 /* First probe the page size */
2856 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2857 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2858 val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2859 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2860 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2864 gpsz = GIC_PAGE_SIZE_4K;
2866 case GIC_PAGE_SIZE_4K:
2869 case GIC_PAGE_SIZE_16K:
2872 case GIC_PAGE_SIZE_64K:
2878 * Start populating the register from scratch, including RO fields
2879 * (which we want to print in debug cases...)
2882 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2883 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2885 /* How many entries per GIC page? */
2887 epp = psz / (esz * SZ_8);
2890 * If we need more than just a single L1 page, flag the table
2891 * as indirect and compute the number of required L1 pages.
2893 if (epp < ITS_MAX_VPEID) {
2896 val |= GICR_VPROPBASER_4_1_INDIRECT;
2898 /* Number of L2 pages required to cover the VPEID space */
2899 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2901 /* Number of L1 pages to point to the L2 pages */
2902 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2907 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2909 /* Right, that's the number of CPU pages we need for L1 */
2910 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2912 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2913 np, npg, psz, epp, esz);
2914 page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2918 gic_data_rdist()->vpe_l1_base = page_address(page);
2919 pa = virt_to_phys(page_address(page));
2920 WARN_ON(!IS_ALIGNED(pa, psz));
2922 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2923 val |= GICR_VPROPBASER_RaWb;
2924 val |= GICR_VPROPBASER_InnerShareable;
2925 val |= GICR_VPROPBASER_4_1_Z;
2926 val |= GICR_VPROPBASER_4_1_VALID;
2929 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2930 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2932 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2933 smp_processor_id(), val,
2934 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2939 static int its_alloc_collections(struct its_node *its)
2943 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2945 if (!its->collections)
2948 for (i = 0; i < nr_cpu_ids; i++)
2949 its->collections[i].target_address = ~0ULL;
2954 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2956 struct page *pend_page;
2958 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2959 get_order(LPI_PENDBASE_SZ));
2963 /* Make sure the GIC will observe the zero-ed page */
2964 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2969 static void its_free_pending_table(struct page *pt)
2971 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2975 * Booting with kdump and LPIs enabled is generally fine. Any other
2976 * case is wrong in the absence of firmware/EFI support.
2978 static bool enabled_lpis_allowed(void)
2983 /* Check whether the property table is in a reserved region */
2984 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2985 addr = val & GENMASK_ULL(51, 12);
2987 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2990 static int __init allocate_lpi_tables(void)
2996 * If LPIs are enabled while we run this from the boot CPU,
2997 * flag the RD tables as pre-allocated if the stars do align.
2999 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
3000 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
3001 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
3002 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
3003 pr_info("GICv3: Using preallocated redistributor tables\n");
3006 err = its_setup_lpi_prop_table();
3011 * We allocate all the pending tables anyway, as we may have a
3012 * mix of RDs that have had LPIs enabled, and some that
3013 * don't. We'll free the unused ones as each CPU comes online.
3015 for_each_possible_cpu(cpu) {
3016 struct page *pend_page;
3018 pend_page = its_allocate_pending_table(GFP_NOWAIT);
3020 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3024 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3030 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3032 u32 count = 1000000; /* 1s! */
3036 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3037 val &= ~GICR_VPENDBASER_Valid;
3040 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3043 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3044 clean = !(val & GICR_VPENDBASER_Dirty);
3050 } while (!clean && count);
3052 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
3053 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3054 val |= GICR_VPENDBASER_PendingLast;
3060 static void its_cpu_init_lpis(void)
3062 void __iomem *rbase = gic_data_rdist_rd_base();
3063 struct page *pend_page;
3067 if (gic_data_rdist()->lpi_enabled)
3070 val = readl_relaxed(rbase + GICR_CTLR);
3071 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3072 (val & GICR_CTLR_ENABLE_LPIS)) {
3074 * Check that we get the same property table on all
3075 * RDs. If we don't, this is hopeless.
3077 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3078 paddr &= GENMASK_ULL(51, 12);
3079 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3080 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3082 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3083 paddr &= GENMASK_ULL(51, 16);
3085 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3086 its_free_pending_table(gic_data_rdist()->pend_page);
3087 gic_data_rdist()->pend_page = NULL;
3092 pend_page = gic_data_rdist()->pend_page;
3093 paddr = page_to_phys(pend_page);
3094 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
3097 val = (gic_rdists->prop_table_pa |
3098 GICR_PROPBASER_InnerShareable |
3099 GICR_PROPBASER_RaWaWb |
3100 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3102 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3103 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3105 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3106 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3108 * The HW reports non-shareable, we must
3109 * remove the cacheability attributes as
3112 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3113 GICR_PROPBASER_CACHEABILITY_MASK);
3114 val |= GICR_PROPBASER_nC;
3115 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3117 pr_info_once("GIC: using cache flushing for LPI property table\n");
3118 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3122 val = (page_to_phys(pend_page) |
3123 GICR_PENDBASER_InnerShareable |
3124 GICR_PENDBASER_RaWaWb);
3126 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3127 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3129 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3131 * The HW reports non-shareable, we must remove the
3132 * cacheability attributes as well.
3134 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3135 GICR_PENDBASER_CACHEABILITY_MASK);
3136 val |= GICR_PENDBASER_nC;
3137 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3141 val = readl_relaxed(rbase + GICR_CTLR);
3142 val |= GICR_CTLR_ENABLE_LPIS;
3143 writel_relaxed(val, rbase + GICR_CTLR);
3145 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3146 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3149 * It's possible for CPU to receive VLPIs before it is
3150 * scheduled as a vPE, especially for the first CPU, and the
3151 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3152 * as out of range and dropped by GIC.
3153 * So we initialize IDbits to known value to avoid VLPI drop.
3155 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3156 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3157 smp_processor_id(), val);
3158 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3161 * Also clear Valid bit of GICR_VPENDBASER, in case some
3162 * ancient programming gets left in and has possibility of
3163 * corrupting memory.
3165 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3168 if (allocate_vpe_l1_table()) {
3170 * If the allocation has failed, we're in massive trouble.
3171 * Disable direct injection, and pray that no VM was
3172 * already running...
3174 gic_rdists->has_rvpeid = false;
3175 gic_rdists->has_vlpis = false;
3178 /* Make sure the GIC has seen the above */
3181 gic_data_rdist()->lpi_enabled = true;
3182 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3184 gic_data_rdist()->pend_page ? "allocated" : "reserved",
3188 static void its_cpu_init_collection(struct its_node *its)
3190 int cpu = smp_processor_id();
3193 /* avoid cross node collections and its mapping */
3194 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3195 struct device_node *cpu_node;
3197 cpu_node = of_get_cpu_node(cpu, NULL);
3198 if (its->numa_node != NUMA_NO_NODE &&
3199 its->numa_node != of_node_to_nid(cpu_node))
3204 * We now have to bind each collection to its target
3207 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3209 * This ITS wants the physical address of the
3212 target = gic_data_rdist()->phys_base;
3214 /* This ITS wants a linear CPU number. */
3215 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3216 target = GICR_TYPER_CPU_NUMBER(target) << 16;
3219 /* Perform collection mapping */
3220 its->collections[cpu].target_address = target;
3221 its->collections[cpu].col_id = cpu;
3223 its_send_mapc(its, &its->collections[cpu], 1);
3224 its_send_invall(its, &its->collections[cpu]);
3227 static void its_cpu_init_collections(void)
3229 struct its_node *its;
3231 raw_spin_lock(&its_lock);
3233 list_for_each_entry(its, &its_nodes, entry)
3234 its_cpu_init_collection(its);
3236 raw_spin_unlock(&its_lock);
3239 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3241 struct its_device *its_dev = NULL, *tmp;
3242 unsigned long flags;
3244 raw_spin_lock_irqsave(&its->lock, flags);
3246 list_for_each_entry(tmp, &its->its_device_list, entry) {
3247 if (tmp->device_id == dev_id) {
3253 raw_spin_unlock_irqrestore(&its->lock, flags);
3258 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3262 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3263 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3264 return &its->tables[i];
3270 static bool its_alloc_table_entry(struct its_node *its,
3271 struct its_baser *baser, u32 id)
3277 /* Don't allow device id that exceeds single, flat table limit */
3278 esz = GITS_BASER_ENTRY_SIZE(baser->val);
3279 if (!(baser->val & GITS_BASER_INDIRECT))
3280 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3282 /* Compute 1st level table index & check if that exceeds table limit */
3283 idx = id >> ilog2(baser->psz / esz);
3284 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3287 table = baser->base;
3289 /* Allocate memory for 2nd level table */
3291 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3292 get_order(baser->psz));
3296 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3297 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3298 gic_flush_dcache_to_poc(page_address(page), baser->psz);
3300 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3302 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3303 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3304 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3306 /* Ensure updated table contents are visible to ITS hardware */
3313 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3315 struct its_baser *baser;
3317 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3319 /* Don't allow device id that exceeds ITS hardware limit */
3321 return (ilog2(dev_id) < device_ids(its));
3323 return its_alloc_table_entry(its, baser, dev_id);
3326 static bool its_alloc_vpe_table(u32 vpe_id)
3328 struct its_node *its;
3332 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3333 * could try and only do it on ITSs corresponding to devices
3334 * that have interrupts targeted at this VPE, but the
3335 * complexity becomes crazy (and you have tons of memory
3338 list_for_each_entry(its, &its_nodes, entry) {
3339 struct its_baser *baser;
3344 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3348 if (!its_alloc_table_entry(its, baser, vpe_id))
3352 /* Non v4.1? No need to iterate RDs and go back early. */
3353 if (!gic_rdists->has_rvpeid)
3357 * Make sure the L2 tables are allocated for all copies of
3358 * the L1 table on *all* v4.1 RDs.
3360 for_each_possible_cpu(cpu) {
3361 if (!allocate_vpe_l2_table(cpu, vpe_id))
3368 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3369 int nvecs, bool alloc_lpis)
3371 struct its_device *dev;
3372 unsigned long *lpi_map = NULL;
3373 unsigned long flags;
3374 u16 *col_map = NULL;
3381 if (!its_alloc_device_table(its, dev_id))
3384 if (WARN_ON(!is_power_of_2(nvecs)))
3385 nvecs = roundup_pow_of_two(nvecs);
3387 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3389 * Even if the device wants a single LPI, the ITT must be
3390 * sized as a power of two (and you need at least one bit...).
3392 nr_ites = max(2, nvecs);
3393 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3394 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3395 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3397 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3399 col_map = kcalloc(nr_lpis, sizeof(*col_map),
3402 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3407 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3415 gic_flush_dcache_to_poc(itt, sz);
3419 dev->nr_ites = nr_ites;
3420 dev->event_map.lpi_map = lpi_map;
3421 dev->event_map.col_map = col_map;
3422 dev->event_map.lpi_base = lpi_base;
3423 dev->event_map.nr_lpis = nr_lpis;
3424 raw_spin_lock_init(&dev->event_map.vlpi_lock);
3425 dev->device_id = dev_id;
3426 INIT_LIST_HEAD(&dev->entry);
3428 raw_spin_lock_irqsave(&its->lock, flags);
3429 list_add(&dev->entry, &its->its_device_list);
3430 raw_spin_unlock_irqrestore(&its->lock, flags);
3432 /* Map device to its ITT */
3433 its_send_mapd(dev, 1);
3438 static void its_free_device(struct its_device *its_dev)
3440 unsigned long flags;
3442 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3443 list_del(&its_dev->entry);
3444 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3445 kfree(its_dev->event_map.col_map);
3446 kfree(its_dev->itt);
3450 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3454 /* Find a free LPI region in lpi_map and allocate them. */
3455 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3456 dev->event_map.nr_lpis,
3457 get_count_order(nvecs));
3461 *hwirq = dev->event_map.lpi_base + idx;
3466 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3467 int nvec, msi_alloc_info_t *info)
3469 struct its_node *its;
3470 struct its_device *its_dev;
3471 struct msi_domain_info *msi_info;
3476 * We ignore "dev" entirely, and rely on the dev_id that has
3477 * been passed via the scratchpad. This limits this domain's
3478 * usefulness to upper layers that definitely know that they
3479 * are built on top of the ITS.
3481 dev_id = info->scratchpad[0].ul;
3483 msi_info = msi_get_domain_info(domain);
3484 its = msi_info->data;
3486 if (!gic_rdists->has_direct_lpi &&
3488 vpe_proxy.dev->its == its &&
3489 dev_id == vpe_proxy.dev->device_id) {
3490 /* Bad luck. Get yourself a better implementation */
3491 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3496 mutex_lock(&its->dev_alloc_lock);
3497 its_dev = its_find_device(its, dev_id);
3500 * We already have seen this ID, probably through
3501 * another alias (PCI bridge of some sort). No need to
3502 * create the device.
3504 its_dev->shared = true;
3505 pr_debug("Reusing ITT for devID %x\n", dev_id);
3509 its_dev = its_create_device(its, dev_id, nvec, true);
3515 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3517 mutex_unlock(&its->dev_alloc_lock);
3518 info->scratchpad[0].ptr = its_dev;
3522 static struct msi_domain_ops its_msi_domain_ops = {
3523 .msi_prepare = its_msi_prepare,
3526 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3528 irq_hw_number_t hwirq)
3530 struct irq_fwspec fwspec;
3532 if (irq_domain_get_of_node(domain->parent)) {
3533 fwspec.fwnode = domain->parent->fwnode;
3534 fwspec.param_count = 3;
3535 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3536 fwspec.param[1] = hwirq;
3537 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3538 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3539 fwspec.fwnode = domain->parent->fwnode;
3540 fwspec.param_count = 2;
3541 fwspec.param[0] = hwirq;
3542 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3547 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3550 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3551 unsigned int nr_irqs, void *args)
3553 msi_alloc_info_t *info = args;
3554 struct its_device *its_dev = info->scratchpad[0].ptr;
3555 struct its_node *its = its_dev->its;
3556 struct irq_data *irqd;
3557 irq_hw_number_t hwirq;
3561 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3565 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3569 for (i = 0; i < nr_irqs; i++) {
3570 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3574 irq_domain_set_hwirq_and_chip(domain, virq + i,
3575 hwirq + i, &its_irq_chip, its_dev);
3576 irqd = irq_get_irq_data(virq + i);
3577 irqd_set_single_target(irqd);
3578 irqd_set_affinity_on_activate(irqd);
3579 pr_debug("ID:%d pID:%d vID:%d\n",
3580 (int)(hwirq + i - its_dev->event_map.lpi_base),
3581 (int)(hwirq + i), virq + i);
3587 static int its_irq_domain_activate(struct irq_domain *domain,
3588 struct irq_data *d, bool reserve)
3590 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3591 u32 event = its_get_event_id(d);
3594 cpu = its_select_cpu(d, cpu_online_mask);
3595 if (cpu < 0 || cpu >= nr_cpu_ids)
3598 its_inc_lpi_count(d, cpu);
3599 its_dev->event_map.col_map[event] = cpu;
3600 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3602 /* Map the GIC IRQ and event to the device */
3603 its_send_mapti(its_dev, d->hwirq, event);
3607 static void its_irq_domain_deactivate(struct irq_domain *domain,
3610 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3611 u32 event = its_get_event_id(d);
3613 its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3614 /* Stop the delivery of interrupts */
3615 its_send_discard(its_dev, event);
3618 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3619 unsigned int nr_irqs)
3621 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3622 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3623 struct its_node *its = its_dev->its;
3626 bitmap_release_region(its_dev->event_map.lpi_map,
3627 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3628 get_count_order(nr_irqs));
3630 for (i = 0; i < nr_irqs; i++) {
3631 struct irq_data *data = irq_domain_get_irq_data(domain,
3633 /* Nuke the entry in the domain */
3634 irq_domain_reset_irq_data(data);
3637 mutex_lock(&its->dev_alloc_lock);
3640 * If all interrupts have been freed, start mopping the
3641 * floor. This is conditioned on the device not being shared.
3643 if (!its_dev->shared &&
3644 bitmap_empty(its_dev->event_map.lpi_map,
3645 its_dev->event_map.nr_lpis)) {
3646 its_lpi_free(its_dev->event_map.lpi_map,
3647 its_dev->event_map.lpi_base,
3648 its_dev->event_map.nr_lpis);
3650 /* Unmap device/itt */
3651 its_send_mapd(its_dev, 0);
3652 its_free_device(its_dev);
3655 mutex_unlock(&its->dev_alloc_lock);
3657 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3660 static const struct irq_domain_ops its_domain_ops = {
3661 .alloc = its_irq_domain_alloc,
3662 .free = its_irq_domain_free,
3663 .activate = its_irq_domain_activate,
3664 .deactivate = its_irq_domain_deactivate,
3670 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3671 * likely), the only way to perform an invalidate is to use a fake
3672 * device to issue an INV command, implying that the LPI has first
3673 * been mapped to some event on that device. Since this is not exactly
3674 * cheap, we try to keep that mapping around as long as possible, and
3675 * only issue an UNMAP if we're short on available slots.
3677 * Broken by design(tm).
3679 * GICv4.1, on the other hand, mandates that we're able to invalidate
3680 * by writing to a MMIO register. It doesn't implement the whole of
3681 * DirectLPI, but that's good enough. And most of the time, we don't
3682 * even have to invalidate anything, as the redistributor can be told
3683 * whether to generate a doorbell or not (we thus leave it enabled,
3686 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3688 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3689 if (gic_rdists->has_rvpeid)
3692 /* Already unmapped? */
3693 if (vpe->vpe_proxy_event == -1)
3696 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3697 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3700 * We don't track empty slots at all, so let's move the
3701 * next_victim pointer if we can quickly reuse that slot
3702 * instead of nuking an existing entry. Not clear that this is
3703 * always a win though, and this might just generate a ripple
3704 * effect... Let's just hope VPEs don't migrate too often.
3706 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3707 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3709 vpe->vpe_proxy_event = -1;
3712 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3714 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3715 if (gic_rdists->has_rvpeid)
3718 if (!gic_rdists->has_direct_lpi) {
3719 unsigned long flags;
3721 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3722 its_vpe_db_proxy_unmap_locked(vpe);
3723 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3727 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3729 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3730 if (gic_rdists->has_rvpeid)
3733 /* Already mapped? */
3734 if (vpe->vpe_proxy_event != -1)
3737 /* This slot was already allocated. Kick the other VPE out. */
3738 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3739 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3741 /* Map the new VPE instead */
3742 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3743 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3744 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3746 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3747 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3750 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3752 unsigned long flags;
3753 struct its_collection *target_col;
3755 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3756 if (gic_rdists->has_rvpeid)
3759 if (gic_rdists->has_direct_lpi) {
3760 void __iomem *rdbase;
3762 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3763 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3764 wait_for_syncr(rdbase);
3769 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3771 its_vpe_db_proxy_map_locked(vpe);
3773 target_col = &vpe_proxy.dev->its->collections[to];
3774 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3775 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3777 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3780 static int its_vpe_set_affinity(struct irq_data *d,
3781 const struct cpumask *mask_val,
3784 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3785 struct cpumask common, *table_mask;
3786 unsigned long flags;
3790 * Changing affinity is mega expensive, so let's be as lazy as
3791 * we can and only do it if we really have to. Also, if mapped
3792 * into the proxy device, we need to move the doorbell
3793 * interrupt to its new location.
3795 * Another thing is that changing the affinity of a vPE affects
3796 * *other interrupts* such as all the vLPIs that are routed to
3797 * this vPE. This means that the irq_desc lock is not enough to
3798 * protect us, and that we must ensure nobody samples vpe->col_idx
3799 * during the update, hence the lock below which must also be
3800 * taken on any vLPI handling path that evaluates vpe->col_idx.
3802 from = vpe_to_cpuid_lock(vpe, &flags);
3803 table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
3806 * If we are offered another CPU in the same GICv4.1 ITS
3807 * affinity, pick this one. Otherwise, any CPU will do.
3809 if (table_mask && cpumask_and(&common, mask_val, table_mask))
3810 cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
3812 cpu = cpumask_first(mask_val);
3819 its_send_vmovp(vpe);
3820 its_vpe_db_proxy_move(vpe, from, cpu);
3823 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3824 vpe_to_cpuid_unlock(vpe, flags);
3826 return IRQ_SET_MASK_OK_DONE;
3829 static void its_wait_vpt_parse_complete(void)
3831 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3834 if (!gic_rdists->has_vpend_valid_dirty)
3837 WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
3839 !(val & GICR_VPENDBASER_Dirty),
3843 static void its_vpe_schedule(struct its_vpe *vpe)
3845 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3848 /* Schedule the VPE */
3849 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3850 GENMASK_ULL(51, 12);
3851 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3852 val |= GICR_VPROPBASER_RaWb;
3853 val |= GICR_VPROPBASER_InnerShareable;
3854 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3856 val = virt_to_phys(page_address(vpe->vpt_page)) &
3857 GENMASK_ULL(51, 16);
3858 val |= GICR_VPENDBASER_RaWaWb;
3859 val |= GICR_VPENDBASER_InnerShareable;
3861 * There is no good way of finding out if the pending table is
3862 * empty as we can race against the doorbell interrupt very
3863 * easily. So in the end, vpe->pending_last is only an
3864 * indication that the vcpu has something pending, not one
3865 * that the pending table is empty. A good implementation
3866 * would be able to read its coarse map pretty quickly anyway,
3867 * making this a tolerable issue.
3869 val |= GICR_VPENDBASER_PendingLast;
3870 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3871 val |= GICR_VPENDBASER_Valid;
3872 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3874 its_wait_vpt_parse_complete();
3877 static void its_vpe_deschedule(struct its_vpe *vpe)
3879 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3882 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3884 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3885 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3888 static void its_vpe_invall(struct its_vpe *vpe)
3890 struct its_node *its;
3892 list_for_each_entry(its, &its_nodes, entry) {
3896 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3900 * Sending a VINVALL to a single ITS is enough, as all
3901 * we need is to reach the redistributors.
3903 its_send_vinvall(its, vpe);
3908 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3910 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3911 struct its_cmd_info *info = vcpu_info;
3913 switch (info->cmd_type) {
3915 its_vpe_schedule(vpe);
3918 case DESCHEDULE_VPE:
3919 its_vpe_deschedule(vpe);
3923 its_vpe_invall(vpe);
3931 static void its_vpe_send_cmd(struct its_vpe *vpe,
3932 void (*cmd)(struct its_device *, u32))
3934 unsigned long flags;
3936 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3938 its_vpe_db_proxy_map_locked(vpe);
3939 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3941 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3944 static void its_vpe_send_inv(struct irq_data *d)
3946 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3948 if (gic_rdists->has_direct_lpi)
3949 __direct_lpi_inv(d, d->parent_data->hwirq);
3951 its_vpe_send_cmd(vpe, its_send_inv);
3954 static void its_vpe_mask_irq(struct irq_data *d)
3957 * We need to unmask the LPI, which is described by the parent
3958 * irq_data. Instead of calling into the parent (which won't
3959 * exactly do the right thing, let's simply use the
3960 * parent_data pointer. Yes, I'm naughty.
3962 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3963 its_vpe_send_inv(d);
3966 static void its_vpe_unmask_irq(struct irq_data *d)
3968 /* Same hack as above... */
3969 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3970 its_vpe_send_inv(d);
3973 static int its_vpe_set_irqchip_state(struct irq_data *d,
3974 enum irqchip_irq_state which,
3977 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3979 if (which != IRQCHIP_STATE_PENDING)
3982 if (gic_rdists->has_direct_lpi) {
3983 void __iomem *rdbase;
3985 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3987 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3989 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3990 wait_for_syncr(rdbase);
3994 its_vpe_send_cmd(vpe, its_send_int);
3996 its_vpe_send_cmd(vpe, its_send_clear);
4002 static int its_vpe_retrigger(struct irq_data *d)
4004 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4007 static struct irq_chip its_vpe_irq_chip = {
4008 .name = "GICv4-vpe",
4009 .irq_mask = its_vpe_mask_irq,
4010 .irq_unmask = its_vpe_unmask_irq,
4011 .irq_eoi = irq_chip_eoi_parent,
4012 .irq_set_affinity = its_vpe_set_affinity,
4013 .irq_retrigger = its_vpe_retrigger,
4014 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
4015 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
4018 static struct its_node *find_4_1_its(void)
4020 static struct its_node *its = NULL;
4023 list_for_each_entry(its, &its_nodes, entry) {
4035 static void its_vpe_4_1_send_inv(struct irq_data *d)
4037 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4038 struct its_node *its;
4041 * GICv4.1 wants doorbells to be invalidated using the
4042 * INVDB command in order to be broadcast to all RDs. Send
4043 * it to the first valid ITS, and let the HW do its magic.
4045 its = find_4_1_its();
4047 its_send_invdb(its, vpe);
4050 static void its_vpe_4_1_mask_irq(struct irq_data *d)
4052 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4053 its_vpe_4_1_send_inv(d);
4056 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4058 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4059 its_vpe_4_1_send_inv(d);
4062 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4063 struct its_cmd_info *info)
4065 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4068 /* Schedule the VPE */
4069 val |= GICR_VPENDBASER_Valid;
4070 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4071 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4072 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4074 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4076 its_wait_vpt_parse_complete();
4079 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4080 struct its_cmd_info *info)
4082 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4086 unsigned long flags;
4089 * vPE is going to block: make the vPE non-resident with
4090 * PendingLast clear and DB set. The GIC guarantees that if
4091 * we read-back PendingLast clear, then a doorbell will be
4092 * delivered when an interrupt comes.
4094 * Note the locking to deal with the concurrent update of
4095 * pending_last from the doorbell interrupt handler that can
4098 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4099 val = its_clear_vpend_valid(vlpi_base,
4100 GICR_VPENDBASER_PendingLast,
4101 GICR_VPENDBASER_4_1_DB);
4102 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4103 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4106 * We're not blocking, so just make the vPE non-resident
4107 * with PendingLast set, indicating that we'll be back.
4109 val = its_clear_vpend_valid(vlpi_base,
4111 GICR_VPENDBASER_PendingLast);
4112 vpe->pending_last = true;
4116 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4118 void __iomem *rdbase;
4119 unsigned long flags;
4123 val = GICR_INVALLR_V;
4124 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4126 /* Target the redistributor this vPE is currently known on */
4127 cpu = vpe_to_cpuid_lock(vpe, &flags);
4128 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4129 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
4130 gic_write_lpir(val, rdbase + GICR_INVALLR);
4132 wait_for_syncr(rdbase);
4133 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4134 vpe_to_cpuid_unlock(vpe, flags);
4137 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4139 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4140 struct its_cmd_info *info = vcpu_info;
4142 switch (info->cmd_type) {
4144 its_vpe_4_1_schedule(vpe, info);
4147 case DESCHEDULE_VPE:
4148 its_vpe_4_1_deschedule(vpe, info);
4152 its_vpe_4_1_invall(vpe);
4160 static struct irq_chip its_vpe_4_1_irq_chip = {
4161 .name = "GICv4.1-vpe",
4162 .irq_mask = its_vpe_4_1_mask_irq,
4163 .irq_unmask = its_vpe_4_1_unmask_irq,
4164 .irq_eoi = irq_chip_eoi_parent,
4165 .irq_set_affinity = its_vpe_set_affinity,
4166 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
4169 static void its_configure_sgi(struct irq_data *d, bool clear)
4171 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4172 struct its_cmd_desc desc;
4174 desc.its_vsgi_cmd.vpe = vpe;
4175 desc.its_vsgi_cmd.sgi = d->hwirq;
4176 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4177 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4178 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4179 desc.its_vsgi_cmd.clear = clear;
4182 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4183 * destination VPE is mapped there. Since we map them eagerly at
4184 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4186 its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4189 static void its_sgi_mask_irq(struct irq_data *d)
4191 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4193 vpe->sgi_config[d->hwirq].enabled = false;
4194 its_configure_sgi(d, false);
4197 static void its_sgi_unmask_irq(struct irq_data *d)
4199 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4201 vpe->sgi_config[d->hwirq].enabled = true;
4202 its_configure_sgi(d, false);
4205 static int its_sgi_set_affinity(struct irq_data *d,
4206 const struct cpumask *mask_val,
4210 * There is no notion of affinity for virtual SGIs, at least
4211 * not on the host (since they can only be targeting a vPE).
4212 * Tell the kernel we've done whatever it asked for.
4214 irq_data_update_effective_affinity(d, mask_val);
4215 return IRQ_SET_MASK_OK;
4218 static int its_sgi_set_irqchip_state(struct irq_data *d,
4219 enum irqchip_irq_state which,
4222 if (which != IRQCHIP_STATE_PENDING)
4226 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4227 struct its_node *its = find_4_1_its();
4230 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4231 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4232 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4234 its_configure_sgi(d, true);
4240 static int its_sgi_get_irqchip_state(struct irq_data *d,
4241 enum irqchip_irq_state which, bool *val)
4243 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4245 unsigned long flags;
4246 u32 count = 1000000; /* 1s! */
4250 if (which != IRQCHIP_STATE_PENDING)
4254 * Locking galore! We can race against two different events:
4256 * - Concurrent vPE affinity change: we must make sure it cannot
4257 * happen, or we'll talk to the wrong redistributor. This is
4258 * identical to what happens with vLPIs.
4260 * - Concurrent VSGIPENDR access: As it involves accessing two
4261 * MMIO registers, this must be made atomic one way or another.
4263 cpu = vpe_to_cpuid_lock(vpe, &flags);
4264 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4265 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4266 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4268 status = readl_relaxed(base + GICR_VSGIPENDR);
4269 if (!(status & GICR_VSGIPENDR_BUSY))
4274 pr_err_ratelimited("Unable to get SGI status\n");
4282 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4283 vpe_to_cpuid_unlock(vpe, flags);
4288 *val = !!(status & (1 << d->hwirq));
4293 static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4295 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4296 struct its_cmd_info *info = vcpu_info;
4298 switch (info->cmd_type) {
4299 case PROP_UPDATE_VSGI:
4300 vpe->sgi_config[d->hwirq].priority = info->priority;
4301 vpe->sgi_config[d->hwirq].group = info->group;
4302 its_configure_sgi(d, false);
4310 static struct irq_chip its_sgi_irq_chip = {
4311 .name = "GICv4.1-sgi",
4312 .irq_mask = its_sgi_mask_irq,
4313 .irq_unmask = its_sgi_unmask_irq,
4314 .irq_set_affinity = its_sgi_set_affinity,
4315 .irq_set_irqchip_state = its_sgi_set_irqchip_state,
4316 .irq_get_irqchip_state = its_sgi_get_irqchip_state,
4317 .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
4320 static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4321 unsigned int virq, unsigned int nr_irqs,
4324 struct its_vpe *vpe = args;
4327 /* Yes, we do want 16 SGIs */
4328 WARN_ON(nr_irqs != 16);
4330 for (i = 0; i < 16; i++) {
4331 vpe->sgi_config[i].priority = 0;
4332 vpe->sgi_config[i].enabled = false;
4333 vpe->sgi_config[i].group = false;
4335 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4336 &its_sgi_irq_chip, vpe);
4337 irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4343 static void its_sgi_irq_domain_free(struct irq_domain *domain,
4345 unsigned int nr_irqs)
4350 static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4351 struct irq_data *d, bool reserve)
4353 /* Write out the initial SGI configuration */
4354 its_configure_sgi(d, false);
4358 static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4361 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4364 * The VSGI command is awkward:
4366 * - To change the configuration, CLEAR must be set to false,
4367 * leaving the pending bit unchanged.
4368 * - To clear the pending bit, CLEAR must be set to true, leaving
4369 * the configuration unchanged.
4371 * You just can't do both at once, hence the two commands below.
4373 vpe->sgi_config[d->hwirq].enabled = false;
4374 its_configure_sgi(d, false);
4375 its_configure_sgi(d, true);
4378 static const struct irq_domain_ops its_sgi_domain_ops = {
4379 .alloc = its_sgi_irq_domain_alloc,
4380 .free = its_sgi_irq_domain_free,
4381 .activate = its_sgi_irq_domain_activate,
4382 .deactivate = its_sgi_irq_domain_deactivate,
4385 static int its_vpe_id_alloc(void)
4387 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
4390 static void its_vpe_id_free(u16 id)
4392 ida_simple_remove(&its_vpeid_ida, id);
4395 static int its_vpe_init(struct its_vpe *vpe)
4397 struct page *vpt_page;
4400 /* Allocate vpe_id */
4401 vpe_id = its_vpe_id_alloc();
4406 vpt_page = its_allocate_pending_table(GFP_KERNEL);
4408 its_vpe_id_free(vpe_id);
4412 if (!its_alloc_vpe_table(vpe_id)) {
4413 its_vpe_id_free(vpe_id);
4414 its_free_pending_table(vpt_page);
4418 raw_spin_lock_init(&vpe->vpe_lock);
4419 vpe->vpe_id = vpe_id;
4420 vpe->vpt_page = vpt_page;
4421 if (gic_rdists->has_rvpeid)
4422 atomic_set(&vpe->vmapp_count, 0);
4424 vpe->vpe_proxy_event = -1;
4429 static void its_vpe_teardown(struct its_vpe *vpe)
4431 its_vpe_db_proxy_unmap(vpe);
4432 its_vpe_id_free(vpe->vpe_id);
4433 its_free_pending_table(vpe->vpt_page);
4436 static void its_vpe_irq_domain_free(struct irq_domain *domain,
4438 unsigned int nr_irqs)
4440 struct its_vm *vm = domain->host_data;
4443 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4445 for (i = 0; i < nr_irqs; i++) {
4446 struct irq_data *data = irq_domain_get_irq_data(domain,
4448 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4450 BUG_ON(vm != vpe->its_vm);
4452 clear_bit(data->hwirq, vm->db_bitmap);
4453 its_vpe_teardown(vpe);
4454 irq_domain_reset_irq_data(data);
4457 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4458 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4459 its_free_prop_table(vm->vprop_page);
4463 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4464 unsigned int nr_irqs, void *args)
4466 struct irq_chip *irqchip = &its_vpe_irq_chip;
4467 struct its_vm *vm = args;
4468 unsigned long *bitmap;
4469 struct page *vprop_page;
4470 int base, nr_ids, i, err = 0;
4474 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4478 if (nr_ids < nr_irqs) {
4479 its_lpi_free(bitmap, base, nr_ids);
4483 vprop_page = its_allocate_prop_table(GFP_KERNEL);
4485 its_lpi_free(bitmap, base, nr_ids);
4489 vm->db_bitmap = bitmap;
4490 vm->db_lpi_base = base;
4491 vm->nr_db_lpis = nr_ids;
4492 vm->vprop_page = vprop_page;
4494 if (gic_rdists->has_rvpeid)
4495 irqchip = &its_vpe_4_1_irq_chip;
4497 for (i = 0; i < nr_irqs; i++) {
4498 vm->vpes[i]->vpe_db_lpi = base + i;
4499 err = its_vpe_init(vm->vpes[i]);
4502 err = its_irq_gic_domain_alloc(domain, virq + i,
4503 vm->vpes[i]->vpe_db_lpi);
4506 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4507 irqchip, vm->vpes[i]);
4513 its_vpe_irq_domain_free(domain, virq, i);
4515 its_lpi_free(bitmap, base, nr_ids);
4516 its_free_prop_table(vprop_page);
4522 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4523 struct irq_data *d, bool reserve)
4525 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4526 struct its_node *its;
4529 * If we use the list map, we issue VMAPP on demand... Unless
4530 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4531 * so that VSGIs can work.
4533 if (!gic_requires_eager_mapping())
4536 /* Map the VPE to the first possible CPU */
4537 vpe->col_idx = cpumask_first(cpu_online_mask);
4539 list_for_each_entry(its, &its_nodes, entry) {
4543 its_send_vmapp(its, vpe, true);
4544 its_send_vinvall(its, vpe);
4547 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4552 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4555 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4556 struct its_node *its;
4559 * If we use the list map on GICv4.0, we unmap the VPE once no
4560 * VLPIs are associated with the VM.
4562 if (!gic_requires_eager_mapping())
4565 list_for_each_entry(its, &its_nodes, entry) {
4569 its_send_vmapp(its, vpe, false);
4573 static const struct irq_domain_ops its_vpe_domain_ops = {
4574 .alloc = its_vpe_irq_domain_alloc,
4575 .free = its_vpe_irq_domain_free,
4576 .activate = its_vpe_irq_domain_activate,
4577 .deactivate = its_vpe_irq_domain_deactivate,
4580 static int its_force_quiescent(void __iomem *base)
4582 u32 count = 1000000; /* 1s */
4585 val = readl_relaxed(base + GITS_CTLR);
4587 * GIC architecture specification requires the ITS to be both
4588 * disabled and quiescent for writes to GITS_BASER<n> or
4589 * GITS_CBASER to not have UNPREDICTABLE results.
4591 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4594 /* Disable the generation of all interrupts to this ITS */
4595 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4596 writel_relaxed(val, base + GITS_CTLR);
4598 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4600 val = readl_relaxed(base + GITS_CTLR);
4601 if (val & GITS_CTLR_QUIESCENT)
4613 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4615 struct its_node *its = data;
4617 /* erratum 22375: only alloc 8MB table size (20 bits) */
4618 its->typer &= ~GITS_TYPER_DEVBITS;
4619 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4620 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4625 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4627 struct its_node *its = data;
4629 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4634 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4636 struct its_node *its = data;
4638 /* On QDF2400, the size of the ITE is 16Bytes */
4639 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4640 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4645 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4647 struct its_node *its = its_dev->its;
4650 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4651 * which maps 32-bit writes targeted at a separate window of
4652 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4653 * with device ID taken from bits [device_id_bits + 1:2] of
4654 * the window offset.
4656 return its->pre_its_base + (its_dev->device_id << 2);
4659 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4661 struct its_node *its = data;
4662 u32 pre_its_window[2];
4665 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4666 "socionext,synquacer-pre-its",
4668 ARRAY_SIZE(pre_its_window))) {
4670 its->pre_its_base = pre_its_window[0];
4671 its->get_msi_base = its_irq_get_msi_base_pre_its;
4673 ids = ilog2(pre_its_window[1]) - 2;
4674 if (device_ids(its) > ids) {
4675 its->typer &= ~GITS_TYPER_DEVBITS;
4676 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4679 /* the pre-ITS breaks isolation, so disable MSI remapping */
4680 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
4686 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4688 struct its_node *its = data;
4691 * Hip07 insists on using the wrong address for the VLPI
4692 * page. Trick it into doing the right thing...
4694 its->vlpi_redist_offset = SZ_128K;
4698 static const struct gic_quirk its_quirks[] = {
4699 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4701 .desc = "ITS: Cavium errata 22375, 24313",
4702 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4704 .init = its_enable_quirk_cavium_22375,
4707 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4709 .desc = "ITS: Cavium erratum 23144",
4710 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4712 .init = its_enable_quirk_cavium_23144,
4715 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4717 .desc = "ITS: QDF2400 erratum 0065",
4718 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4720 .init = its_enable_quirk_qdf2400_e0065,
4723 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4726 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4727 * implementation, but with a 'pre-ITS' added that requires
4728 * special handling in software.
4730 .desc = "ITS: Socionext Synquacer pre-ITS",
4733 .init = its_enable_quirk_socionext_synquacer,
4736 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4738 .desc = "ITS: Hip07 erratum 161600802",
4741 .init = its_enable_quirk_hip07_161600802,
4748 static void its_enable_quirks(struct its_node *its)
4750 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4752 gic_enable_quirks(iidr, its_quirks, its);
4755 static int its_save_disable(void)
4757 struct its_node *its;
4760 raw_spin_lock(&its_lock);
4761 list_for_each_entry(its, &its_nodes, entry) {
4765 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4766 err = its_force_quiescent(base);
4768 pr_err("ITS@%pa: failed to quiesce: %d\n",
4769 &its->phys_base, err);
4770 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4774 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4779 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4783 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4786 raw_spin_unlock(&its_lock);
4791 static void its_restore_enable(void)
4793 struct its_node *its;
4796 raw_spin_lock(&its_lock);
4797 list_for_each_entry(its, &its_nodes, entry) {
4804 * Make sure that the ITS is disabled. If it fails to quiesce,
4805 * don't restore it since writing to CBASER or BASER<n>
4806 * registers is undefined according to the GIC v3 ITS
4809 * Firmware resuming with the ITS enabled is terminally broken.
4811 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
4812 ret = its_force_quiescent(base);
4814 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4815 &its->phys_base, ret);
4819 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4822 * Writing CBASER resets CREADR to 0, so make CWRITER and
4823 * cmd_write line up with it.
4825 its->cmd_write = its->cmd_base;
4826 gits_write_cwriter(0, base + GITS_CWRITER);
4828 /* Restore GITS_BASER from the value cache. */
4829 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4830 struct its_baser *baser = &its->tables[i];
4832 if (!(baser->val & GITS_BASER_VALID))
4835 its_write_baser(its, baser, baser->val);
4837 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4840 * Reinit the collection if it's stored in the ITS. This is
4841 * indicated by the col_id being less than the HCC field.
4842 * CID < HCC as specified in the GIC v3 Documentation.
4844 if (its->collections[smp_processor_id()].col_id <
4845 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4846 its_cpu_init_collection(its);
4848 raw_spin_unlock(&its_lock);
4851 static struct syscore_ops its_syscore_ops = {
4852 .suspend = its_save_disable,
4853 .resume = its_restore_enable,
4856 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
4858 struct irq_domain *inner_domain;
4859 struct msi_domain_info *info;
4861 info = kzalloc(sizeof(*info), GFP_KERNEL);
4865 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
4866 if (!inner_domain) {
4871 inner_domain->parent = its_parent;
4872 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
4873 inner_domain->flags |= its->msi_domain_flags;
4874 info->ops = &its_msi_domain_ops;
4876 inner_domain->host_data = info;
4881 static int its_init_vpe_domain(void)
4883 struct its_node *its;
4887 if (gic_rdists->has_direct_lpi) {
4888 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
4892 /* Any ITS will do, even if not v4 */
4893 its = list_first_entry(&its_nodes, struct its_node, entry);
4895 entries = roundup_pow_of_two(nr_cpu_ids);
4896 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
4898 if (!vpe_proxy.vpes) {
4899 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
4903 /* Use the last possible DevID */
4904 devid = GENMASK(device_ids(its) - 1, 0);
4905 vpe_proxy.dev = its_create_device(its, devid, entries, false);
4906 if (!vpe_proxy.dev) {
4907 kfree(vpe_proxy.vpes);
4908 pr_err("ITS: Can't allocate GICv4 proxy device\n");
4912 BUG_ON(entries > vpe_proxy.dev->nr_ites);
4914 raw_spin_lock_init(&vpe_proxy.lock);
4915 vpe_proxy.next_victim = 0;
4916 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
4917 devid, vpe_proxy.dev->nr_ites);
4922 static int __init its_compute_its_list_map(struct resource *res,
4923 void __iomem *its_base)
4929 * This is assumed to be done early enough that we're
4930 * guaranteed to be single-threaded, hence no
4931 * locking. Should this change, we should address
4934 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
4935 if (its_number >= GICv4_ITS_LIST_MAX) {
4936 pr_err("ITS@%pa: No ITSList entry available!\n",
4941 ctlr = readl_relaxed(its_base + GITS_CTLR);
4942 ctlr &= ~GITS_CTLR_ITS_NUMBER;
4943 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
4944 writel_relaxed(ctlr, its_base + GITS_CTLR);
4945 ctlr = readl_relaxed(its_base + GITS_CTLR);
4946 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
4947 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
4948 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
4951 if (test_and_set_bit(its_number, &its_list_map)) {
4952 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
4953 &res->start, its_number);
4960 static int __init its_probe_one(struct resource *res,
4961 struct fwnode_handle *handle, int numa_node)
4963 struct its_node *its;
4964 void __iomem *its_base;
4966 u64 baser, tmp, typer;
4970 its_base = ioremap(res->start, SZ_64K);
4972 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4976 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4977 if (val != 0x30 && val != 0x40) {
4978 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4983 err = its_force_quiescent(its_base);
4985 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4989 pr_info("ITS %pR\n", res);
4991 its = kzalloc(sizeof(*its), GFP_KERNEL);
4997 raw_spin_lock_init(&its->lock);
4998 mutex_init(&its->dev_alloc_lock);
4999 INIT_LIST_HEAD(&its->entry);
5000 INIT_LIST_HEAD(&its->its_device_list);
5001 typer = gic_read_typer(its_base + GITS_TYPER);
5003 its->base = its_base;
5004 its->phys_base = res->start;
5006 if (!(typer & GITS_TYPER_VMOVP)) {
5007 err = its_compute_its_list_map(res, its_base);
5013 pr_info("ITS@%pa: Using ITS number %d\n",
5016 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
5020 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
5022 its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
5023 if (!its->sgir_base) {
5028 its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
5030 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5031 &res->start, its->mpidr, svpet);
5035 its->numa_node = numa_node;
5037 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5038 get_order(ITS_CMD_QUEUE_SZ));
5041 goto out_unmap_sgir;
5043 its->cmd_base = (void *)page_address(page);
5044 its->cmd_write = its->cmd_base;
5045 its->fwnode_handle = handle;
5046 its->get_msi_base = its_irq_get_msi_base;
5047 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
5049 its_enable_quirks(its);
5051 err = its_alloc_tables(its);
5055 err = its_alloc_collections(its);
5057 goto out_free_tables;
5059 baser = (virt_to_phys(its->cmd_base) |
5060 GITS_CBASER_RaWaWb |
5061 GITS_CBASER_InnerShareable |
5062 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
5065 gits_write_cbaser(baser, its->base + GITS_CBASER);
5066 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5068 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5069 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5071 * The HW reports non-shareable, we must
5072 * remove the cacheability attributes as
5075 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5076 GITS_CBASER_CACHEABILITY_MASK);
5077 baser |= GITS_CBASER_nC;
5078 gits_write_cbaser(baser, its->base + GITS_CBASER);
5080 pr_info("ITS: using cache flushing for cmd queue\n");
5081 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5084 gits_write_cwriter(0, its->base + GITS_CWRITER);
5085 ctlr = readl_relaxed(its->base + GITS_CTLR);
5086 ctlr |= GITS_CTLR_ENABLE;
5088 ctlr |= GITS_CTLR_ImDe;
5089 writel_relaxed(ctlr, its->base + GITS_CTLR);
5091 err = its_init_domain(handle, its);
5093 goto out_free_tables;
5095 raw_spin_lock(&its_lock);
5096 list_add(&its->entry, &its_nodes);
5097 raw_spin_unlock(&its_lock);
5102 its_free_tables(its);
5104 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5107 iounmap(its->sgir_base);
5112 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
5116 static bool gic_rdists_supports_plpis(void)
5118 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5121 static int redist_disable_lpis(void)
5123 void __iomem *rbase = gic_data_rdist_rd_base();
5124 u64 timeout = USEC_PER_SEC;
5127 if (!gic_rdists_supports_plpis()) {
5128 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5132 val = readl_relaxed(rbase + GICR_CTLR);
5133 if (!(val & GICR_CTLR_ENABLE_LPIS))
5137 * If coming via a CPU hotplug event, we don't need to disable
5138 * LPIs before trying to re-enable them. They are already
5139 * configured and all is well in the world.
5141 * If running with preallocated tables, there is nothing to do.
5143 if (gic_data_rdist()->lpi_enabled ||
5144 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5148 * From that point on, we only try to do some damage control.
5150 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5151 smp_processor_id());
5152 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5155 val &= ~GICR_CTLR_ENABLE_LPIS;
5156 writel_relaxed(val, rbase + GICR_CTLR);
5158 /* Make sure any change to GICR_CTLR is observable by the GIC */
5162 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5163 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5164 * Error out if we time out waiting for RWP to clear.
5166 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5168 pr_err("CPU%d: Timeout while disabling LPIs\n",
5169 smp_processor_id());
5177 * After it has been written to 1, it is IMPLEMENTATION
5178 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5179 * cleared to 0. Error out if clearing the bit failed.
5181 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5182 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5189 int its_cpu_init(void)
5191 if (!list_empty(&its_nodes)) {
5194 ret = redist_disable_lpis();
5198 its_cpu_init_lpis();
5199 its_cpu_init_collections();
5205 static const struct of_device_id its_device_id[] = {
5206 { .compatible = "arm,gic-v3-its", },
5210 static int __init its_of_probe(struct device_node *node)
5212 struct device_node *np;
5213 struct resource res;
5215 for (np = of_find_matching_node(node, its_device_id); np;
5216 np = of_find_matching_node(np, its_device_id)) {
5217 if (!of_device_is_available(np))
5219 if (!of_property_read_bool(np, "msi-controller")) {
5220 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5225 if (of_address_to_resource(np, 0, &res)) {
5226 pr_warn("%pOF: no regs?\n", np);
5230 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
5237 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5239 #ifdef CONFIG_ACPI_NUMA
5240 struct its_srat_map {
5247 static struct its_srat_map *its_srat_maps __initdata;
5248 static int its_in_srat __initdata;
5250 static int __init acpi_get_its_numa_node(u32 its_id)
5254 for (i = 0; i < its_in_srat; i++) {
5255 if (its_id == its_srat_maps[i].its_id)
5256 return its_srat_maps[i].numa_node;
5258 return NUMA_NO_NODE;
5261 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5262 const unsigned long end)
5267 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5268 const unsigned long end)
5271 struct acpi_srat_gic_its_affinity *its_affinity;
5273 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5277 if (its_affinity->header.length < sizeof(*its_affinity)) {
5278 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5279 its_affinity->header.length);
5284 * Note that in theory a new proximity node could be created by this
5285 * entry as it is an SRAT resource allocation structure.
5286 * We do not currently support doing so.
5288 node = pxm_to_node(its_affinity->proximity_domain);
5290 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5291 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5295 its_srat_maps[its_in_srat].numa_node = node;
5296 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5298 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5299 its_affinity->proximity_domain, its_affinity->its_id, node);
5304 static void __init acpi_table_parse_srat_its(void)
5308 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5309 sizeof(struct acpi_table_srat),
5310 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5311 gic_acpi_match_srat_its, 0);
5315 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5317 if (!its_srat_maps) {
5318 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
5322 acpi_table_parse_entries(ACPI_SIG_SRAT,
5323 sizeof(struct acpi_table_srat),
5324 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5325 gic_acpi_parse_srat_its, 0);
5328 /* free the its_srat_maps after ITS probing */
5329 static void __init acpi_its_srat_maps_free(void)
5331 kfree(its_srat_maps);
5334 static void __init acpi_table_parse_srat_its(void) { }
5335 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
5336 static void __init acpi_its_srat_maps_free(void) { }
5339 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5340 const unsigned long end)
5342 struct acpi_madt_generic_translator *its_entry;
5343 struct fwnode_handle *dom_handle;
5344 struct resource res;
5347 its_entry = (struct acpi_madt_generic_translator *)header;
5348 memset(&res, 0, sizeof(res));
5349 res.start = its_entry->base_address;
5350 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5351 res.flags = IORESOURCE_MEM;
5353 dom_handle = irq_domain_alloc_fwnode(&res.start);
5355 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5360 err = iort_register_domain_token(its_entry->translation_id, res.start,
5363 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5364 &res.start, its_entry->translation_id);
5368 err = its_probe_one(&res, dom_handle,
5369 acpi_get_its_numa_node(its_entry->translation_id));
5373 iort_deregister_domain_token(its_entry->translation_id);
5375 irq_domain_free_fwnode(dom_handle);
5379 static void __init its_acpi_probe(void)
5381 acpi_table_parse_srat_its();
5382 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5383 gic_acpi_parse_madt_its, 0);
5384 acpi_its_srat_maps_free();
5387 static void __init its_acpi_probe(void) { }
5390 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5391 struct irq_domain *parent_domain)
5393 struct device_node *of_node;
5394 struct its_node *its;
5395 bool has_v4 = false;
5396 bool has_v4_1 = false;
5399 gic_rdists = rdists;
5401 its_parent = parent_domain;
5402 of_node = to_of_node(handle);
5404 its_of_probe(of_node);
5408 if (list_empty(&its_nodes)) {
5409 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5413 err = allocate_lpi_tables();
5417 list_for_each_entry(its, &its_nodes, entry) {
5418 has_v4 |= is_v4(its);
5419 has_v4_1 |= is_v4_1(its);
5422 /* Don't bother with inconsistent systems */
5423 if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5424 rdists->has_rvpeid = false;
5426 if (has_v4 & rdists->has_vlpis) {
5427 const struct irq_domain_ops *sgi_ops;
5430 sgi_ops = &its_sgi_domain_ops;
5434 if (its_init_vpe_domain() ||
5435 its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5436 rdists->has_vlpis = false;
5437 pr_err("ITS: Disabling GICv4 support\n");
5441 register_syscore_ops(&its_syscore_ops);