1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitmap.h>
10 #include <linux/cpu.h>
11 #include <linux/crash_dump.h>
12 #include <linux/delay.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/efi.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/list.h>
18 #include <linux/log2.h>
19 #include <linux/memblock.h>
21 #include <linux/msi.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_pci.h>
26 #include <linux/of_platform.h>
27 #include <linux/percpu.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
31 #include <linux/irqchip.h>
32 #include <linux/irqchip/arm-gic-v3.h>
33 #include <linux/irqchip/arm-gic-v4.h>
35 #include <asm/cputype.h>
36 #include <asm/exception.h>
38 #include "irq-gic-common.h"
40 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
41 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
42 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
44 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
45 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
47 static u32 lpi_id_bits;
50 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
51 * deal with (one configuration byte per interrupt). PENDBASE has to
52 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
54 #define LPI_NRBITS lpi_id_bits
55 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
56 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
58 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
61 * Collection structure - just an ID, and a redistributor address to
62 * ping. We use one per CPU as a bag of interrupts assigned to this
65 struct its_collection {
71 * The ITS_BASER structure - contains memory information, cached
72 * value of BASER register configuration and ITS page size.
84 * The ITS structure - contains most of the infrastructure, with the
85 * top-level MSI domain, the command queue, the collections, and the
86 * list of devices writing to it.
88 * dev_alloc_lock has to be taken for device allocations, while the
89 * spinlock must be taken to parse data structures such as the device
94 struct mutex dev_alloc_lock;
95 struct list_head entry;
97 phys_addr_t phys_base;
98 struct its_cmd_block *cmd_base;
99 struct its_cmd_block *cmd_write;
100 struct its_baser tables[GITS_BASER_NR_REGS];
101 struct its_collection *collections;
102 struct fwnode_handle *fwnode_handle;
103 u64 (*get_msi_base)(struct its_device *its_dev);
106 struct list_head its_device_list;
108 unsigned long list_nr;
112 unsigned int msi_domain_flags;
113 u32 pre_its_base; /* for Socionext Synquacer */
115 int vlpi_redist_offset;
118 #define ITS_ITT_ALIGN SZ_256
120 /* The maximum number of VPEID bits supported by VLPI commands */
121 #define ITS_MAX_VPEID_BITS (16)
122 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
124 /* Convert page order to size in bytes */
125 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
127 struct event_lpi_map {
128 unsigned long *lpi_map;
130 irq_hw_number_t lpi_base;
132 struct mutex vlpi_lock;
134 struct its_vlpi_map *vlpi_maps;
139 * The ITS view of a device - belongs to an ITS, owns an interrupt
140 * translation table, and a list of interrupts. If it some of its
141 * LPIs are injected into a guest (GICv4), the event_map.vm field
142 * indicates which one.
145 struct list_head entry;
146 struct its_node *its;
147 struct event_lpi_map event_map;
156 struct its_device *dev;
157 struct its_vpe **vpes;
161 static LIST_HEAD(its_nodes);
162 static DEFINE_RAW_SPINLOCK(its_lock);
163 static struct rdists *gic_rdists;
164 static struct irq_domain *its_parent;
166 static unsigned long its_list_map;
167 static u16 vmovp_seq_num;
168 static DEFINE_RAW_SPINLOCK(vmovp_lock);
170 static DEFINE_IDA(its_vpeid_ida);
172 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
173 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
174 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
175 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
177 static u16 get_its_list(struct its_vm *vm)
179 struct its_node *its;
180 unsigned long its_list = 0;
182 list_for_each_entry(its, &its_nodes, entry) {
186 if (vm->vlpi_count[its->list_nr])
187 __set_bit(its->list_nr, &its_list);
190 return (u16)its_list;
193 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
196 struct its_node *its = its_dev->its;
198 return its->collections + its_dev->event_map.col_map[event];
201 static struct its_collection *valid_col(struct its_collection *col)
203 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
209 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
211 if (valid_col(its->collections + vpe->col_idx))
218 * ITS command descriptors - parameters to be encoded in a command
221 struct its_cmd_desc {
224 struct its_device *dev;
229 struct its_device *dev;
234 struct its_device *dev;
239 struct its_device *dev;
244 struct its_collection *col;
249 struct its_device *dev;
255 struct its_device *dev;
256 struct its_collection *col;
261 struct its_device *dev;
266 struct its_collection *col;
275 struct its_collection *col;
281 struct its_device *dev;
289 struct its_device *dev;
296 struct its_collection *col;
304 * The ITS command block, which is what the ITS actually parses.
306 struct its_cmd_block {
310 #define ITS_CMD_QUEUE_SZ SZ_64K
311 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
313 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
314 struct its_cmd_block *,
315 struct its_cmd_desc *);
317 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
318 struct its_cmd_block *,
319 struct its_cmd_desc *);
321 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
323 u64 mask = GENMASK_ULL(h, l);
325 *raw_cmd |= (val << l) & mask;
328 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
330 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
333 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
335 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
338 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
340 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
343 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
345 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
348 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
350 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
353 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
355 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
358 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
360 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
363 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
365 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
368 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
370 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
373 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
375 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
378 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
380 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
383 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
385 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
388 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
390 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
393 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
395 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
398 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
400 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
403 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
405 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
408 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
410 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
413 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
415 /* Let's fixup BE commands */
416 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
417 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
418 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
419 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
422 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
423 struct its_cmd_block *cmd,
424 struct its_cmd_desc *desc)
426 unsigned long itt_addr;
427 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
429 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
430 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
432 its_encode_cmd(cmd, GITS_CMD_MAPD);
433 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
434 its_encode_size(cmd, size - 1);
435 its_encode_itt(cmd, itt_addr);
436 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
443 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
444 struct its_cmd_block *cmd,
445 struct its_cmd_desc *desc)
447 its_encode_cmd(cmd, GITS_CMD_MAPC);
448 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
449 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
450 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
454 return desc->its_mapc_cmd.col;
457 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
458 struct its_cmd_block *cmd,
459 struct its_cmd_desc *desc)
461 struct its_collection *col;
463 col = dev_event_to_col(desc->its_mapti_cmd.dev,
464 desc->its_mapti_cmd.event_id);
466 its_encode_cmd(cmd, GITS_CMD_MAPTI);
467 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
468 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
469 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
470 its_encode_collection(cmd, col->col_id);
474 return valid_col(col);
477 static struct its_collection *its_build_movi_cmd(struct its_node *its,
478 struct its_cmd_block *cmd,
479 struct its_cmd_desc *desc)
481 struct its_collection *col;
483 col = dev_event_to_col(desc->its_movi_cmd.dev,
484 desc->its_movi_cmd.event_id);
486 its_encode_cmd(cmd, GITS_CMD_MOVI);
487 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
488 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
489 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
493 return valid_col(col);
496 static struct its_collection *its_build_discard_cmd(struct its_node *its,
497 struct its_cmd_block *cmd,
498 struct its_cmd_desc *desc)
500 struct its_collection *col;
502 col = dev_event_to_col(desc->its_discard_cmd.dev,
503 desc->its_discard_cmd.event_id);
505 its_encode_cmd(cmd, GITS_CMD_DISCARD);
506 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
507 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
511 return valid_col(col);
514 static struct its_collection *its_build_inv_cmd(struct its_node *its,
515 struct its_cmd_block *cmd,
516 struct its_cmd_desc *desc)
518 struct its_collection *col;
520 col = dev_event_to_col(desc->its_inv_cmd.dev,
521 desc->its_inv_cmd.event_id);
523 its_encode_cmd(cmd, GITS_CMD_INV);
524 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
525 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
529 return valid_col(col);
532 static struct its_collection *its_build_int_cmd(struct its_node *its,
533 struct its_cmd_block *cmd,
534 struct its_cmd_desc *desc)
536 struct its_collection *col;
538 col = dev_event_to_col(desc->its_int_cmd.dev,
539 desc->its_int_cmd.event_id);
541 its_encode_cmd(cmd, GITS_CMD_INT);
542 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
543 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
547 return valid_col(col);
550 static struct its_collection *its_build_clear_cmd(struct its_node *its,
551 struct its_cmd_block *cmd,
552 struct its_cmd_desc *desc)
554 struct its_collection *col;
556 col = dev_event_to_col(desc->its_clear_cmd.dev,
557 desc->its_clear_cmd.event_id);
559 its_encode_cmd(cmd, GITS_CMD_CLEAR);
560 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
561 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
565 return valid_col(col);
568 static struct its_collection *its_build_invall_cmd(struct its_node *its,
569 struct its_cmd_block *cmd,
570 struct its_cmd_desc *desc)
572 its_encode_cmd(cmd, GITS_CMD_INVALL);
573 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
577 return desc->its_invall_cmd.col;
580 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
581 struct its_cmd_block *cmd,
582 struct its_cmd_desc *desc)
584 its_encode_cmd(cmd, GITS_CMD_VINVALL);
585 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
589 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
592 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
593 struct its_cmd_block *cmd,
594 struct its_cmd_desc *desc)
596 unsigned long vpt_addr;
599 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
600 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
602 its_encode_cmd(cmd, GITS_CMD_VMAPP);
603 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
604 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
605 its_encode_target(cmd, target);
606 its_encode_vpt_addr(cmd, vpt_addr);
607 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
611 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
614 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
615 struct its_cmd_block *cmd,
616 struct its_cmd_desc *desc)
620 if (desc->its_vmapti_cmd.db_enabled)
621 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
625 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
626 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
627 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
628 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
629 its_encode_db_phys_id(cmd, db);
630 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
634 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
637 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
638 struct its_cmd_block *cmd,
639 struct its_cmd_desc *desc)
643 if (desc->its_vmovi_cmd.db_enabled)
644 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
648 its_encode_cmd(cmd, GITS_CMD_VMOVI);
649 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
650 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
651 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
652 its_encode_db_phys_id(cmd, db);
653 its_encode_db_valid(cmd, true);
657 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
660 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
661 struct its_cmd_block *cmd,
662 struct its_cmd_desc *desc)
666 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
667 its_encode_cmd(cmd, GITS_CMD_VMOVP);
668 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
669 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
670 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
671 its_encode_target(cmd, target);
675 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
678 static u64 its_cmd_ptr_to_offset(struct its_node *its,
679 struct its_cmd_block *ptr)
681 return (ptr - its->cmd_base) * sizeof(*ptr);
684 static int its_queue_full(struct its_node *its)
689 widx = its->cmd_write - its->cmd_base;
690 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
692 /* This is incredibly unlikely to happen, unless the ITS locks up. */
693 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
699 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
701 struct its_cmd_block *cmd;
702 u32 count = 1000000; /* 1s! */
704 while (its_queue_full(its)) {
707 pr_err_ratelimited("ITS queue not draining\n");
714 cmd = its->cmd_write++;
716 /* Handle queue wrapping */
717 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
718 its->cmd_write = its->cmd_base;
729 static struct its_cmd_block *its_post_commands(struct its_node *its)
731 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
733 writel_relaxed(wr, its->base + GITS_CWRITER);
735 return its->cmd_write;
738 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
741 * Make sure the commands written to memory are observable by
744 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
745 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
750 static int its_wait_for_range_completion(struct its_node *its,
752 struct its_cmd_block *to)
754 u64 rd_idx, to_idx, linear_idx;
755 u32 count = 1000000; /* 1s! */
757 /* Linearize to_idx if the command set has wrapped around */
758 to_idx = its_cmd_ptr_to_offset(its, to);
759 if (to_idx < prev_idx)
760 to_idx += ITS_CMD_QUEUE_SZ;
762 linear_idx = prev_idx;
767 rd_idx = readl_relaxed(its->base + GITS_CREADR);
770 * Compute the read pointer progress, taking the
771 * potential wrap-around into account.
773 delta = rd_idx - prev_idx;
774 if (rd_idx < prev_idx)
775 delta += ITS_CMD_QUEUE_SZ;
778 if (linear_idx >= to_idx)
783 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
795 /* Warning, macro hell follows */
796 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
797 void name(struct its_node *its, \
799 struct its_cmd_desc *desc) \
801 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
802 synctype *sync_obj; \
803 unsigned long flags; \
806 raw_spin_lock_irqsave(&its->lock, flags); \
808 cmd = its_allocate_entry(its); \
809 if (!cmd) { /* We're soooooo screewed... */ \
810 raw_spin_unlock_irqrestore(&its->lock, flags); \
813 sync_obj = builder(its, cmd, desc); \
814 its_flush_cmd(its, cmd); \
817 sync_cmd = its_allocate_entry(its); \
821 buildfn(its, sync_cmd, sync_obj); \
822 its_flush_cmd(its, sync_cmd); \
826 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
827 next_cmd = its_post_commands(its); \
828 raw_spin_unlock_irqrestore(&its->lock, flags); \
830 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
831 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
834 static void its_build_sync_cmd(struct its_node *its,
835 struct its_cmd_block *sync_cmd,
836 struct its_collection *sync_col)
838 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
839 its_encode_target(sync_cmd, sync_col->target_address);
841 its_fixup_cmd(sync_cmd);
844 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
845 struct its_collection, its_build_sync_cmd)
847 static void its_build_vsync_cmd(struct its_node *its,
848 struct its_cmd_block *sync_cmd,
849 struct its_vpe *sync_vpe)
851 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
852 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
854 its_fixup_cmd(sync_cmd);
857 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
858 struct its_vpe, its_build_vsync_cmd)
860 static void its_send_int(struct its_device *dev, u32 event_id)
862 struct its_cmd_desc desc;
864 desc.its_int_cmd.dev = dev;
865 desc.its_int_cmd.event_id = event_id;
867 its_send_single_command(dev->its, its_build_int_cmd, &desc);
870 static void its_send_clear(struct its_device *dev, u32 event_id)
872 struct its_cmd_desc desc;
874 desc.its_clear_cmd.dev = dev;
875 desc.its_clear_cmd.event_id = event_id;
877 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
880 static void its_send_inv(struct its_device *dev, u32 event_id)
882 struct its_cmd_desc desc;
884 desc.its_inv_cmd.dev = dev;
885 desc.its_inv_cmd.event_id = event_id;
887 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
890 static void its_send_mapd(struct its_device *dev, int valid)
892 struct its_cmd_desc desc;
894 desc.its_mapd_cmd.dev = dev;
895 desc.its_mapd_cmd.valid = !!valid;
897 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
900 static void its_send_mapc(struct its_node *its, struct its_collection *col,
903 struct its_cmd_desc desc;
905 desc.its_mapc_cmd.col = col;
906 desc.its_mapc_cmd.valid = !!valid;
908 its_send_single_command(its, its_build_mapc_cmd, &desc);
911 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
913 struct its_cmd_desc desc;
915 desc.its_mapti_cmd.dev = dev;
916 desc.its_mapti_cmd.phys_id = irq_id;
917 desc.its_mapti_cmd.event_id = id;
919 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
922 static void its_send_movi(struct its_device *dev,
923 struct its_collection *col, u32 id)
925 struct its_cmd_desc desc;
927 desc.its_movi_cmd.dev = dev;
928 desc.its_movi_cmd.col = col;
929 desc.its_movi_cmd.event_id = id;
931 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
934 static void its_send_discard(struct its_device *dev, u32 id)
936 struct its_cmd_desc desc;
938 desc.its_discard_cmd.dev = dev;
939 desc.its_discard_cmd.event_id = id;
941 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
944 static void its_send_invall(struct its_node *its, struct its_collection *col)
946 struct its_cmd_desc desc;
948 desc.its_invall_cmd.col = col;
950 its_send_single_command(its, its_build_invall_cmd, &desc);
953 static void its_send_vmapti(struct its_device *dev, u32 id)
955 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
956 struct its_cmd_desc desc;
958 desc.its_vmapti_cmd.vpe = map->vpe;
959 desc.its_vmapti_cmd.dev = dev;
960 desc.its_vmapti_cmd.virt_id = map->vintid;
961 desc.its_vmapti_cmd.event_id = id;
962 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
964 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
967 static void its_send_vmovi(struct its_device *dev, u32 id)
969 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
970 struct its_cmd_desc desc;
972 desc.its_vmovi_cmd.vpe = map->vpe;
973 desc.its_vmovi_cmd.dev = dev;
974 desc.its_vmovi_cmd.event_id = id;
975 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
977 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
980 static void its_send_vmapp(struct its_node *its,
981 struct its_vpe *vpe, bool valid)
983 struct its_cmd_desc desc;
985 desc.its_vmapp_cmd.vpe = vpe;
986 desc.its_vmapp_cmd.valid = valid;
987 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
989 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
992 static void its_send_vmovp(struct its_vpe *vpe)
994 struct its_cmd_desc desc = {};
995 struct its_node *its;
997 int col_id = vpe->col_idx;
999 desc.its_vmovp_cmd.vpe = vpe;
1001 if (!its_list_map) {
1002 its = list_first_entry(&its_nodes, struct its_node, entry);
1003 desc.its_vmovp_cmd.col = &its->collections[col_id];
1004 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1009 * Yet another marvel of the architecture. If using the
1010 * its_list "feature", we need to make sure that all ITSs
1011 * receive all VMOVP commands in the same order. The only way
1012 * to guarantee this is to make vmovp a serialization point.
1016 raw_spin_lock_irqsave(&vmovp_lock, flags);
1018 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1019 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1022 list_for_each_entry(its, &its_nodes, entry) {
1026 if (!vpe->its_vm->vlpi_count[its->list_nr])
1029 desc.its_vmovp_cmd.col = &its->collections[col_id];
1030 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1033 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1036 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1038 struct its_cmd_desc desc;
1040 desc.its_vinvall_cmd.vpe = vpe;
1041 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1045 * irqchip functions - assumes MSI, mostly.
1048 static inline u32 its_get_event_id(struct irq_data *d)
1050 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1051 return d->hwirq - its_dev->event_map.lpi_base;
1054 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1056 irq_hw_number_t hwirq;
1060 if (irqd_is_forwarded_to_vcpu(d)) {
1061 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1062 u32 event = its_get_event_id(d);
1063 struct its_vlpi_map *map;
1065 va = page_address(its_dev->event_map.vm->vprop_page);
1066 map = &its_dev->event_map.vlpi_maps[event];
1067 hwirq = map->vintid;
1069 /* Remember the updated property */
1070 map->properties &= ~clr;
1071 map->properties |= set | LPI_PROP_GROUP1;
1073 va = gic_rdists->prop_table_va;
1077 cfg = va + hwirq - 8192;
1079 *cfg |= set | LPI_PROP_GROUP1;
1082 * Make the above write visible to the redistributors.
1083 * And yes, we're flushing exactly: One. Single. Byte.
1086 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1087 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1092 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1094 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1096 lpi_write_config(d, clr, set);
1097 its_send_inv(its_dev, its_get_event_id(d));
1100 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1102 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1103 u32 event = its_get_event_id(d);
1105 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1108 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1111 * More fun with the architecture:
1113 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1114 * value or to 1023, depending on the enable bit. But that
1115 * would be issueing a mapping for an /existing/ DevID+EventID
1116 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1117 * to the /same/ vPE, using this opportunity to adjust the
1118 * doorbell. Mouahahahaha. We loves it, Precious.
1120 its_send_vmovi(its_dev, event);
1123 static void its_mask_irq(struct irq_data *d)
1125 if (irqd_is_forwarded_to_vcpu(d))
1126 its_vlpi_set_doorbell(d, false);
1128 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1131 static void its_unmask_irq(struct irq_data *d)
1133 if (irqd_is_forwarded_to_vcpu(d))
1134 its_vlpi_set_doorbell(d, true);
1136 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1139 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1143 const struct cpumask *cpu_mask = cpu_online_mask;
1144 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1145 struct its_collection *target_col;
1146 u32 id = its_get_event_id(d);
1148 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1149 if (irqd_is_forwarded_to_vcpu(d))
1152 /* lpi cannot be routed to a redistributor that is on a foreign node */
1153 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1154 if (its_dev->its->numa_node >= 0) {
1155 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1156 if (!cpumask_intersects(mask_val, cpu_mask))
1161 cpu = cpumask_any_and(mask_val, cpu_mask);
1163 if (cpu >= nr_cpu_ids)
1166 /* don't set the affinity when the target cpu is same as current one */
1167 if (cpu != its_dev->event_map.col_map[id]) {
1168 target_col = &its_dev->its->collections[cpu];
1169 its_send_movi(its_dev, target_col, id);
1170 its_dev->event_map.col_map[id] = cpu;
1171 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1174 return IRQ_SET_MASK_OK_DONE;
1177 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1179 struct its_node *its = its_dev->its;
1181 return its->phys_base + GITS_TRANSLATER;
1184 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1186 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1187 struct its_node *its;
1191 addr = its->get_msi_base(its_dev);
1193 msg->address_lo = lower_32_bits(addr);
1194 msg->address_hi = upper_32_bits(addr);
1195 msg->data = its_get_event_id(d);
1197 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1200 static int its_irq_set_irqchip_state(struct irq_data *d,
1201 enum irqchip_irq_state which,
1204 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1205 u32 event = its_get_event_id(d);
1207 if (which != IRQCHIP_STATE_PENDING)
1211 its_send_int(its_dev, event);
1213 its_send_clear(its_dev, event);
1218 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1220 unsigned long flags;
1222 /* Not using the ITS list? Everything is always mapped. */
1226 raw_spin_lock_irqsave(&vmovp_lock, flags);
1229 * If the VM wasn't mapped yet, iterate over the vpes and get
1232 vm->vlpi_count[its->list_nr]++;
1234 if (vm->vlpi_count[its->list_nr] == 1) {
1237 for (i = 0; i < vm->nr_vpes; i++) {
1238 struct its_vpe *vpe = vm->vpes[i];
1239 struct irq_data *d = irq_get_irq_data(vpe->irq);
1241 /* Map the VPE to the first possible CPU */
1242 vpe->col_idx = cpumask_first(cpu_online_mask);
1243 its_send_vmapp(its, vpe, true);
1244 its_send_vinvall(its, vpe);
1245 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1249 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1252 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1254 unsigned long flags;
1256 /* Not using the ITS list? Everything is always mapped. */
1260 raw_spin_lock_irqsave(&vmovp_lock, flags);
1262 if (!--vm->vlpi_count[its->list_nr]) {
1265 for (i = 0; i < vm->nr_vpes; i++)
1266 its_send_vmapp(its, vm->vpes[i], false);
1269 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1272 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1274 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1275 u32 event = its_get_event_id(d);
1281 mutex_lock(&its_dev->event_map.vlpi_lock);
1283 if (!its_dev->event_map.vm) {
1284 struct its_vlpi_map *maps;
1286 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1293 its_dev->event_map.vm = info->map->vm;
1294 its_dev->event_map.vlpi_maps = maps;
1295 } else if (its_dev->event_map.vm != info->map->vm) {
1300 /* Get our private copy of the mapping information */
1301 its_dev->event_map.vlpi_maps[event] = *info->map;
1303 if (irqd_is_forwarded_to_vcpu(d)) {
1304 /* Already mapped, move it around */
1305 its_send_vmovi(its_dev, event);
1307 /* Ensure all the VPEs are mapped on this ITS */
1308 its_map_vm(its_dev->its, info->map->vm);
1311 * Flag the interrupt as forwarded so that we can
1312 * start poking the virtual property table.
1314 irqd_set_forwarded_to_vcpu(d);
1316 /* Write out the property to the prop table */
1317 lpi_write_config(d, 0xff, info->map->properties);
1319 /* Drop the physical mapping */
1320 its_send_discard(its_dev, event);
1322 /* and install the virtual one */
1323 its_send_vmapti(its_dev, event);
1325 /* Increment the number of VLPIs */
1326 its_dev->event_map.nr_vlpis++;
1330 mutex_unlock(&its_dev->event_map.vlpi_lock);
1334 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1336 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1337 u32 event = its_get_event_id(d);
1340 mutex_lock(&its_dev->event_map.vlpi_lock);
1342 if (!its_dev->event_map.vm ||
1343 !its_dev->event_map.vlpi_maps[event].vm) {
1348 /* Copy our mapping information to the incoming request */
1349 *info->map = its_dev->event_map.vlpi_maps[event];
1352 mutex_unlock(&its_dev->event_map.vlpi_lock);
1356 static int its_vlpi_unmap(struct irq_data *d)
1358 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1359 u32 event = its_get_event_id(d);
1362 mutex_lock(&its_dev->event_map.vlpi_lock);
1364 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1369 /* Drop the virtual mapping */
1370 its_send_discard(its_dev, event);
1372 /* and restore the physical one */
1373 irqd_clr_forwarded_to_vcpu(d);
1374 its_send_mapti(its_dev, d->hwirq, event);
1375 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1379 /* Potentially unmap the VM from this ITS */
1380 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1383 * Drop the refcount and make the device available again if
1384 * this was the last VLPI.
1386 if (!--its_dev->event_map.nr_vlpis) {
1387 its_dev->event_map.vm = NULL;
1388 kfree(its_dev->event_map.vlpi_maps);
1392 mutex_unlock(&its_dev->event_map.vlpi_lock);
1396 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1398 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1400 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1403 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1404 lpi_update_config(d, 0xff, info->config);
1406 lpi_write_config(d, 0xff, info->config);
1407 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1412 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1414 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1415 struct its_cmd_info *info = vcpu_info;
1418 if (!its_dev->its->is_v4)
1421 /* Unmap request? */
1423 return its_vlpi_unmap(d);
1425 switch (info->cmd_type) {
1427 return its_vlpi_map(d, info);
1430 return its_vlpi_get(d, info);
1432 case PROP_UPDATE_VLPI:
1433 case PROP_UPDATE_AND_INV_VLPI:
1434 return its_vlpi_prop_update(d, info);
1441 static struct irq_chip its_irq_chip = {
1443 .irq_mask = its_mask_irq,
1444 .irq_unmask = its_unmask_irq,
1445 .irq_eoi = irq_chip_eoi_parent,
1446 .irq_set_affinity = its_set_affinity,
1447 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1448 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1449 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1454 * How we allocate LPIs:
1456 * lpi_range_list contains ranges of LPIs that are to available to
1457 * allocate from. To allocate LPIs, just pick the first range that
1458 * fits the required allocation, and reduce it by the required
1459 * amount. Once empty, remove the range from the list.
1461 * To free a range of LPIs, add a free range to the list, sort it and
1462 * merge the result if the new range happens to be adjacent to an
1463 * already free block.
1465 * The consequence of the above is that allocation is cost is low, but
1466 * freeing is expensive. We assumes that freeing rarely occurs.
1468 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1470 static DEFINE_MUTEX(lpi_range_lock);
1471 static LIST_HEAD(lpi_range_list);
1474 struct list_head entry;
1479 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1481 struct lpi_range *range;
1483 range = kmalloc(sizeof(*range), GFP_KERNEL);
1485 range->base_id = base;
1492 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1494 struct lpi_range *range, *tmp;
1497 mutex_lock(&lpi_range_lock);
1499 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1500 if (range->span >= nr_lpis) {
1501 *base = range->base_id;
1502 range->base_id += nr_lpis;
1503 range->span -= nr_lpis;
1505 if (range->span == 0) {
1506 list_del(&range->entry);
1515 mutex_unlock(&lpi_range_lock);
1517 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1521 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
1523 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
1525 if (a->base_id + a->span != b->base_id)
1527 b->base_id = a->base_id;
1529 list_del(&a->entry);
1533 static int free_lpi_range(u32 base, u32 nr_lpis)
1535 struct lpi_range *new, *old;
1537 new = mk_lpi_range(base, nr_lpis);
1541 mutex_lock(&lpi_range_lock);
1543 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
1544 if (old->base_id < base)
1548 * old is the last element with ->base_id smaller than base,
1549 * so new goes right after it. If there are no elements with
1550 * ->base_id smaller than base, &old->entry ends up pointing
1551 * at the head of the list, and inserting new it the start of
1552 * the list is the right thing to do in that case as well.
1554 list_add(&new->entry, &old->entry);
1556 * Now check if we can merge with the preceding and/or
1559 merge_lpi_ranges(old, new);
1560 merge_lpi_ranges(new, list_next_entry(new, entry));
1562 mutex_unlock(&lpi_range_lock);
1566 static int __init its_lpi_init(u32 id_bits)
1568 u32 lpis = (1UL << id_bits) - 8192;
1572 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1574 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1576 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1581 * Initializing the allocator is just the same as freeing the
1582 * full range of LPIs.
1584 err = free_lpi_range(8192, lpis);
1585 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1589 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1591 unsigned long *bitmap = NULL;
1595 err = alloc_lpi_range(nr_irqs, base);
1600 } while (nr_irqs > 0);
1608 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
1616 *base = *nr_ids = 0;
1621 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
1623 WARN_ON(free_lpi_range(base, nr_ids));
1627 static void gic_reset_prop_table(void *va)
1629 /* Priority 0xa0, Group-1, disabled */
1630 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1632 /* Make sure the GIC will observe the written configuration */
1633 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1636 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1638 struct page *prop_page;
1640 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1644 gic_reset_prop_table(page_address(prop_page));
1649 static void its_free_prop_table(struct page *prop_page)
1651 free_pages((unsigned long)page_address(prop_page),
1652 get_order(LPI_PROPBASE_SZ));
1655 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
1657 phys_addr_t start, end, addr_end;
1661 * We don't bother checking for a kdump kernel as by
1662 * construction, the LPI tables are out of this kernel's
1665 if (is_kdump_kernel())
1668 addr_end = addr + size - 1;
1670 for_each_reserved_mem_region(i, &start, &end) {
1671 if (addr >= start && addr_end <= end)
1675 /* Not found, not a good sign... */
1676 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
1678 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
1682 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
1684 if (efi_enabled(EFI_CONFIG_TABLES))
1685 return efi_mem_reserve_persistent(addr, size);
1690 static int __init its_setup_lpi_prop_table(void)
1692 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1695 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1696 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1698 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1699 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1702 gic_reset_prop_table(gic_rdists->prop_table_va);
1706 lpi_id_bits = min_t(u32,
1707 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1708 ITS_MAX_LPI_NRBITS);
1709 page = its_allocate_prop_table(GFP_NOWAIT);
1711 pr_err("Failed to allocate PROPBASE\n");
1715 gic_rdists->prop_table_pa = page_to_phys(page);
1716 gic_rdists->prop_table_va = page_address(page);
1717 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
1721 pr_info("GICv3: using LPI property table @%pa\n",
1722 &gic_rdists->prop_table_pa);
1724 return its_lpi_init(lpi_id_bits);
1727 static const char *its_base_type_string[] = {
1728 [GITS_BASER_TYPE_DEVICE] = "Devices",
1729 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
1730 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
1731 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1732 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1733 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1734 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1737 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1739 u32 idx = baser - its->tables;
1741 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
1744 static void its_write_baser(struct its_node *its, struct its_baser *baser,
1747 u32 idx = baser - its->tables;
1749 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
1750 baser->val = its_read_baser(its, baser);
1753 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1754 u64 cache, u64 shr, u32 psz, u32 order,
1757 u64 val = its_read_baser(its, baser);
1758 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1759 u64 type = GITS_BASER_TYPE(val);
1760 u64 baser_phys, tmp;
1766 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1767 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1768 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1769 &its->phys_base, its_base_type_string[type],
1770 alloc_pages, GITS_BASER_PAGES_MAX);
1771 alloc_pages = GITS_BASER_PAGES_MAX;
1772 order = get_order(GITS_BASER_PAGES_MAX * psz);
1775 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
1779 base = (void *)page_address(page);
1780 baser_phys = virt_to_phys(base);
1782 /* Check if the physical address of the memory is above 48bits */
1783 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1785 /* 52bit PA is supported only when PageSize=64K */
1786 if (psz != SZ_64K) {
1787 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1788 free_pages((unsigned long)base, order);
1792 /* Convert 52bit PA to 48bit field */
1793 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1798 (type << GITS_BASER_TYPE_SHIFT) |
1799 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1800 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1805 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1809 val |= GITS_BASER_PAGE_SIZE_4K;
1812 val |= GITS_BASER_PAGE_SIZE_16K;
1815 val |= GITS_BASER_PAGE_SIZE_64K;
1819 its_write_baser(its, baser, val);
1822 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1824 * Shareability didn't stick. Just use
1825 * whatever the read reported, which is likely
1826 * to be the only thing this redistributor
1827 * supports. If that's zero, make it
1828 * non-cacheable as well.
1830 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1832 cache = GITS_BASER_nC;
1833 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
1838 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1840 * Page size didn't stick. Let's try a smaller
1841 * size and retry. If we reach 4K, then
1842 * something is horribly wrong...
1844 free_pages((unsigned long)base, order);
1850 goto retry_alloc_baser;
1853 goto retry_alloc_baser;
1858 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
1859 &its->phys_base, its_base_type_string[type],
1861 free_pages((unsigned long)base, order);
1865 baser->order = order;
1868 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
1870 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
1871 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
1872 its_base_type_string[type],
1873 (unsigned long)virt_to_phys(base),
1874 indirect ? "indirect" : "flat", (int)esz,
1875 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1880 static bool its_parse_indirect_baser(struct its_node *its,
1881 struct its_baser *baser,
1882 u32 psz, u32 *order, u32 ids)
1884 u64 tmp = its_read_baser(its, baser);
1885 u64 type = GITS_BASER_TYPE(tmp);
1886 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
1887 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
1888 u32 new_order = *order;
1889 bool indirect = false;
1891 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1892 if ((esz << ids) > (psz * 2)) {
1894 * Find out whether hw supports a single or two-level table by
1895 * table by reading bit at offset '62' after writing '1' to it.
1897 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1898 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1902 * The size of the lvl2 table is equal to ITS page size
1903 * which is 'psz'. For computing lvl1 table size,
1904 * subtract ID bits that sparse lvl2 table from 'ids'
1905 * which is reported by ITS hardware times lvl1 table
1908 ids -= ilog2(psz / (int)esz);
1909 esz = GITS_LVL1_ENTRY_SIZE;
1914 * Allocate as many entries as required to fit the
1915 * range of device IDs that the ITS can grok... The ID
1916 * space being incredibly sparse, this results in a
1917 * massive waste of memory if two-level device table
1918 * feature is not supported by hardware.
1920 new_order = max_t(u32, get_order(esz << ids), new_order);
1921 if (new_order >= MAX_ORDER) {
1922 new_order = MAX_ORDER - 1;
1923 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1924 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1925 &its->phys_base, its_base_type_string[type],
1926 its->device_ids, ids);
1934 static void its_free_tables(struct its_node *its)
1938 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1939 if (its->tables[i].base) {
1940 free_pages((unsigned long)its->tables[i].base,
1941 its->tables[i].order);
1942 its->tables[i].base = NULL;
1947 static int its_alloc_tables(struct its_node *its)
1949 u64 shr = GITS_BASER_InnerShareable;
1950 u64 cache = GITS_BASER_RaWaWb;
1954 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1955 /* erratum 24313: ignore memory access type */
1956 cache = GITS_BASER_nCnB;
1958 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1959 struct its_baser *baser = its->tables + i;
1960 u64 val = its_read_baser(its, baser);
1961 u64 type = GITS_BASER_TYPE(val);
1962 u32 order = get_order(psz);
1963 bool indirect = false;
1966 case GITS_BASER_TYPE_NONE:
1969 case GITS_BASER_TYPE_DEVICE:
1970 indirect = its_parse_indirect_baser(its, baser,
1975 case GITS_BASER_TYPE_VCPU:
1976 indirect = its_parse_indirect_baser(its, baser,
1978 ITS_MAX_VPEID_BITS);
1982 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1984 its_free_tables(its);
1988 /* Update settings which will be used for next BASERn */
1990 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1991 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
1997 static int its_alloc_collections(struct its_node *its)
2001 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2003 if (!its->collections)
2006 for (i = 0; i < nr_cpu_ids; i++)
2007 its->collections[i].target_address = ~0ULL;
2012 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2014 struct page *pend_page;
2016 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2017 get_order(LPI_PENDBASE_SZ));
2021 /* Make sure the GIC will observe the zero-ed page */
2022 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2027 static void its_free_pending_table(struct page *pt)
2029 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2033 * Booting with kdump and LPIs enabled is generally fine. Any other
2034 * case is wrong in the absence of firmware/EFI support.
2036 static bool enabled_lpis_allowed(void)
2041 /* Check whether the property table is in a reserved region */
2042 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2043 addr = val & GENMASK_ULL(51, 12);
2045 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2048 static int __init allocate_lpi_tables(void)
2054 * If LPIs are enabled while we run this from the boot CPU,
2055 * flag the RD tables as pre-allocated if the stars do align.
2057 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2058 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2059 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2060 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2061 pr_info("GICv3: Using preallocated redistributor tables\n");
2064 err = its_setup_lpi_prop_table();
2069 * We allocate all the pending tables anyway, as we may have a
2070 * mix of RDs that have had LPIs enabled, and some that
2071 * don't. We'll free the unused ones as each CPU comes online.
2073 for_each_possible_cpu(cpu) {
2074 struct page *pend_page;
2076 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2078 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2082 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2088 static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2090 u32 count = 1000000; /* 1s! */
2094 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2095 val &= ~GICR_VPENDBASER_Valid;
2096 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2099 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2100 clean = !(val & GICR_VPENDBASER_Dirty);
2106 } while (!clean && count);
2111 static void its_cpu_init_lpis(void)
2113 void __iomem *rbase = gic_data_rdist_rd_base();
2114 struct page *pend_page;
2118 if (gic_data_rdist()->lpi_enabled)
2121 val = readl_relaxed(rbase + GICR_CTLR);
2122 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2123 (val & GICR_CTLR_ENABLE_LPIS)) {
2125 * Check that we get the same property table on all
2126 * RDs. If we don't, this is hopeless.
2128 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2129 paddr &= GENMASK_ULL(51, 12);
2130 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2131 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2133 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2134 paddr &= GENMASK_ULL(51, 16);
2136 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
2137 its_free_pending_table(gic_data_rdist()->pend_page);
2138 gic_data_rdist()->pend_page = NULL;
2143 pend_page = gic_data_rdist()->pend_page;
2144 paddr = page_to_phys(pend_page);
2145 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
2148 val = (gic_rdists->prop_table_pa |
2149 GICR_PROPBASER_InnerShareable |
2150 GICR_PROPBASER_RaWaWb |
2151 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2153 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2154 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
2156 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
2157 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2159 * The HW reports non-shareable, we must
2160 * remove the cacheability attributes as
2163 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2164 GICR_PROPBASER_CACHEABILITY_MASK);
2165 val |= GICR_PROPBASER_nC;
2166 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2168 pr_info_once("GIC: using cache flushing for LPI property table\n");
2169 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2173 val = (page_to_phys(pend_page) |
2174 GICR_PENDBASER_InnerShareable |
2175 GICR_PENDBASER_RaWaWb);
2177 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2178 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2180 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2182 * The HW reports non-shareable, we must remove the
2183 * cacheability attributes as well.
2185 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2186 GICR_PENDBASER_CACHEABILITY_MASK);
2187 val |= GICR_PENDBASER_nC;
2188 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2192 val = readl_relaxed(rbase + GICR_CTLR);
2193 val |= GICR_CTLR_ENABLE_LPIS;
2194 writel_relaxed(val, rbase + GICR_CTLR);
2196 if (gic_rdists->has_vlpis) {
2197 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2200 * It's possible for CPU to receive VLPIs before it is
2201 * sheduled as a vPE, especially for the first CPU, and the
2202 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2203 * as out of range and dropped by GIC.
2204 * So we initialize IDbits to known value to avoid VLPI drop.
2206 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2207 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2208 smp_processor_id(), val);
2209 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2212 * Also clear Valid bit of GICR_VPENDBASER, in case some
2213 * ancient programming gets left in and has possibility of
2214 * corrupting memory.
2216 val = its_clear_vpend_valid(vlpi_base);
2217 WARN_ON(val & GICR_VPENDBASER_Dirty);
2220 /* Make sure the GIC has seen the above */
2223 gic_data_rdist()->lpi_enabled = true;
2224 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
2226 gic_data_rdist()->pend_page ? "allocated" : "reserved",
2230 static void its_cpu_init_collection(struct its_node *its)
2232 int cpu = smp_processor_id();
2235 /* avoid cross node collections and its mapping */
2236 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2237 struct device_node *cpu_node;
2239 cpu_node = of_get_cpu_node(cpu, NULL);
2240 if (its->numa_node != NUMA_NO_NODE &&
2241 its->numa_node != of_node_to_nid(cpu_node))
2246 * We now have to bind each collection to its target
2249 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
2251 * This ITS wants the physical address of the
2254 target = gic_data_rdist()->phys_base;
2256 /* This ITS wants a linear CPU number. */
2257 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2258 target = GICR_TYPER_CPU_NUMBER(target) << 16;
2261 /* Perform collection mapping */
2262 its->collections[cpu].target_address = target;
2263 its->collections[cpu].col_id = cpu;
2265 its_send_mapc(its, &its->collections[cpu], 1);
2266 its_send_invall(its, &its->collections[cpu]);
2269 static void its_cpu_init_collections(void)
2271 struct its_node *its;
2273 raw_spin_lock(&its_lock);
2275 list_for_each_entry(its, &its_nodes, entry)
2276 its_cpu_init_collection(its);
2278 raw_spin_unlock(&its_lock);
2281 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2283 struct its_device *its_dev = NULL, *tmp;
2284 unsigned long flags;
2286 raw_spin_lock_irqsave(&its->lock, flags);
2288 list_for_each_entry(tmp, &its->its_device_list, entry) {
2289 if (tmp->device_id == dev_id) {
2295 raw_spin_unlock_irqrestore(&its->lock, flags);
2300 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2304 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2305 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2306 return &its->tables[i];
2312 static bool its_alloc_table_entry(struct its_node *its,
2313 struct its_baser *baser, u32 id)
2319 /* Don't allow device id that exceeds single, flat table limit */
2320 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2321 if (!(baser->val & GITS_BASER_INDIRECT))
2322 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
2324 /* Compute 1st level table index & check if that exceeds table limit */
2325 idx = id >> ilog2(baser->psz / esz);
2326 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2329 table = baser->base;
2331 /* Allocate memory for 2nd level table */
2333 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2334 get_order(baser->psz));
2338 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2339 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2340 gic_flush_dcache_to_poc(page_address(page), baser->psz);
2342 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2344 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2345 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2346 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2348 /* Ensure updated table contents are visible to ITS hardware */
2355 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2357 struct its_baser *baser;
2359 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2361 /* Don't allow device id that exceeds ITS hardware limit */
2363 return (ilog2(dev_id) < its->device_ids);
2365 return its_alloc_table_entry(its, baser, dev_id);
2368 static bool its_alloc_vpe_table(u32 vpe_id)
2370 struct its_node *its;
2373 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2374 * could try and only do it on ITSs corresponding to devices
2375 * that have interrupts targeted at this VPE, but the
2376 * complexity becomes crazy (and you have tons of memory
2379 list_for_each_entry(its, &its_nodes, entry) {
2380 struct its_baser *baser;
2385 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2389 if (!its_alloc_table_entry(its, baser, vpe_id))
2396 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2397 int nvecs, bool alloc_lpis)
2399 struct its_device *dev;
2400 unsigned long *lpi_map = NULL;
2401 unsigned long flags;
2402 u16 *col_map = NULL;
2409 if (!its_alloc_device_table(its, dev_id))
2412 if (WARN_ON(!is_power_of_2(nvecs)))
2413 nvecs = roundup_pow_of_two(nvecs);
2415 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2417 * Even if the device wants a single LPI, the ITT must be
2418 * sized as a power of two (and you need at least one bit...).
2420 nr_ites = max(2, nvecs);
2421 sz = nr_ites * its->ite_size;
2422 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2423 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
2425 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
2427 col_map = kcalloc(nr_lpis, sizeof(*col_map),
2430 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
2435 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
2443 gic_flush_dcache_to_poc(itt, sz);
2447 dev->nr_ites = nr_ites;
2448 dev->event_map.lpi_map = lpi_map;
2449 dev->event_map.col_map = col_map;
2450 dev->event_map.lpi_base = lpi_base;
2451 dev->event_map.nr_lpis = nr_lpis;
2452 mutex_init(&dev->event_map.vlpi_lock);
2453 dev->device_id = dev_id;
2454 INIT_LIST_HEAD(&dev->entry);
2456 raw_spin_lock_irqsave(&its->lock, flags);
2457 list_add(&dev->entry, &its->its_device_list);
2458 raw_spin_unlock_irqrestore(&its->lock, flags);
2460 /* Map device to its ITT */
2461 its_send_mapd(dev, 1);
2466 static void its_free_device(struct its_device *its_dev)
2468 unsigned long flags;
2470 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
2471 list_del(&its_dev->entry);
2472 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
2473 kfree(its_dev->itt);
2477 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
2481 /* Find a free LPI region in lpi_map and allocate them. */
2482 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2483 dev->event_map.nr_lpis,
2484 get_count_order(nvecs));
2488 *hwirq = dev->event_map.lpi_base + idx;
2493 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2494 int nvec, msi_alloc_info_t *info)
2496 struct its_node *its;
2497 struct its_device *its_dev;
2498 struct msi_domain_info *msi_info;
2503 * We ignore "dev" entirely, and rely on the dev_id that has
2504 * been passed via the scratchpad. This limits this domain's
2505 * usefulness to upper layers that definitely know that they
2506 * are built on top of the ITS.
2508 dev_id = info->scratchpad[0].ul;
2510 msi_info = msi_get_domain_info(domain);
2511 its = msi_info->data;
2513 if (!gic_rdists->has_direct_lpi &&
2515 vpe_proxy.dev->its == its &&
2516 dev_id == vpe_proxy.dev->device_id) {
2517 /* Bad luck. Get yourself a better implementation */
2518 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2523 mutex_lock(&its->dev_alloc_lock);
2524 its_dev = its_find_device(its, dev_id);
2527 * We already have seen this ID, probably through
2528 * another alias (PCI bridge of some sort). No need to
2529 * create the device.
2531 its_dev->shared = true;
2532 pr_debug("Reusing ITT for devID %x\n", dev_id);
2536 its_dev = its_create_device(its, dev_id, nvec, true);
2542 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2544 mutex_unlock(&its->dev_alloc_lock);
2545 info->scratchpad[0].ptr = its_dev;
2549 static struct msi_domain_ops its_msi_domain_ops = {
2550 .msi_prepare = its_msi_prepare,
2553 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2555 irq_hw_number_t hwirq)
2557 struct irq_fwspec fwspec;
2559 if (irq_domain_get_of_node(domain->parent)) {
2560 fwspec.fwnode = domain->parent->fwnode;
2561 fwspec.param_count = 3;
2562 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2563 fwspec.param[1] = hwirq;
2564 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
2565 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2566 fwspec.fwnode = domain->parent->fwnode;
2567 fwspec.param_count = 2;
2568 fwspec.param[0] = hwirq;
2569 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2574 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
2577 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2578 unsigned int nr_irqs, void *args)
2580 msi_alloc_info_t *info = args;
2581 struct its_device *its_dev = info->scratchpad[0].ptr;
2582 struct its_node *its = its_dev->its;
2583 struct irq_data *irqd;
2584 irq_hw_number_t hwirq;
2588 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2592 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
2596 for (i = 0; i < nr_irqs; i++) {
2597 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
2601 irq_domain_set_hwirq_and_chip(domain, virq + i,
2602 hwirq + i, &its_irq_chip, its_dev);
2603 irqd = irq_get_irq_data(virq + i);
2604 irqd_set_single_target(irqd);
2605 irqd_set_affinity_on_activate(irqd);
2606 pr_debug("ID:%d pID:%d vID:%d\n",
2607 (int)(hwirq + i - its_dev->event_map.lpi_base),
2608 (int)(hwirq + i), virq + i);
2614 static int its_irq_domain_activate(struct irq_domain *domain,
2615 struct irq_data *d, bool reserve)
2617 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2618 u32 event = its_get_event_id(d);
2619 const struct cpumask *cpu_mask = cpu_online_mask;
2622 /* get the cpu_mask of local node */
2623 if (its_dev->its->numa_node >= 0)
2624 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2626 /* Bind the LPI to the first possible CPU */
2627 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2628 if (cpu >= nr_cpu_ids) {
2629 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2632 cpu = cpumask_first(cpu_online_mask);
2635 its_dev->event_map.col_map[event] = cpu;
2636 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2638 /* Map the GIC IRQ and event to the device */
2639 its_send_mapti(its_dev, d->hwirq, event);
2643 static void its_irq_domain_deactivate(struct irq_domain *domain,
2646 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2647 u32 event = its_get_event_id(d);
2649 /* Stop the delivery of interrupts */
2650 its_send_discard(its_dev, event);
2653 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2654 unsigned int nr_irqs)
2656 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2657 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2658 struct its_node *its = its_dev->its;
2661 bitmap_release_region(its_dev->event_map.lpi_map,
2662 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
2663 get_count_order(nr_irqs));
2665 for (i = 0; i < nr_irqs; i++) {
2666 struct irq_data *data = irq_domain_get_irq_data(domain,
2668 /* Nuke the entry in the domain */
2669 irq_domain_reset_irq_data(data);
2672 mutex_lock(&its->dev_alloc_lock);
2675 * If all interrupts have been freed, start mopping the
2676 * floor. This is conditionned on the device not being shared.
2678 if (!its_dev->shared &&
2679 bitmap_empty(its_dev->event_map.lpi_map,
2680 its_dev->event_map.nr_lpis)) {
2681 its_lpi_free(its_dev->event_map.lpi_map,
2682 its_dev->event_map.lpi_base,
2683 its_dev->event_map.nr_lpis);
2684 kfree(its_dev->event_map.col_map);
2686 /* Unmap device/itt */
2687 its_send_mapd(its_dev, 0);
2688 its_free_device(its_dev);
2691 mutex_unlock(&its->dev_alloc_lock);
2693 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2696 static const struct irq_domain_ops its_domain_ops = {
2697 .alloc = its_irq_domain_alloc,
2698 .free = its_irq_domain_free,
2699 .activate = its_irq_domain_activate,
2700 .deactivate = its_irq_domain_deactivate,
2706 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2707 * likely), the only way to perform an invalidate is to use a fake
2708 * device to issue an INV command, implying that the LPI has first
2709 * been mapped to some event on that device. Since this is not exactly
2710 * cheap, we try to keep that mapping around as long as possible, and
2711 * only issue an UNMAP if we're short on available slots.
2713 * Broken by design(tm).
2715 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2717 /* Already unmapped? */
2718 if (vpe->vpe_proxy_event == -1)
2721 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2722 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2725 * We don't track empty slots at all, so let's move the
2726 * next_victim pointer if we can quickly reuse that slot
2727 * instead of nuking an existing entry. Not clear that this is
2728 * always a win though, and this might just generate a ripple
2729 * effect... Let's just hope VPEs don't migrate too often.
2731 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2732 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2734 vpe->vpe_proxy_event = -1;
2737 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2739 if (!gic_rdists->has_direct_lpi) {
2740 unsigned long flags;
2742 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2743 its_vpe_db_proxy_unmap_locked(vpe);
2744 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2748 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2750 /* Already mapped? */
2751 if (vpe->vpe_proxy_event != -1)
2754 /* This slot was already allocated. Kick the other VPE out. */
2755 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2756 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2758 /* Map the new VPE instead */
2759 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2760 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2761 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2763 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2764 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2767 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2769 unsigned long flags;
2770 struct its_collection *target_col;
2772 if (gic_rdists->has_direct_lpi) {
2773 void __iomem *rdbase;
2775 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2776 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2777 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2783 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2785 its_vpe_db_proxy_map_locked(vpe);
2787 target_col = &vpe_proxy.dev->its->collections[to];
2788 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2789 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2791 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2794 static int its_vpe_set_affinity(struct irq_data *d,
2795 const struct cpumask *mask_val,
2798 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2799 int cpu = cpumask_first(mask_val);
2802 * Changing affinity is mega expensive, so let's be as lazy as
2803 * we can and only do it if we really have to. Also, if mapped
2804 * into the proxy device, we need to move the doorbell
2805 * interrupt to its new location.
2807 if (vpe->col_idx != cpu) {
2808 int from = vpe->col_idx;
2811 its_send_vmovp(vpe);
2812 its_vpe_db_proxy_move(vpe, from, cpu);
2815 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2817 return IRQ_SET_MASK_OK_DONE;
2820 static void its_vpe_schedule(struct its_vpe *vpe)
2822 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2825 /* Schedule the VPE */
2826 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2827 GENMASK_ULL(51, 12);
2828 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2829 val |= GICR_VPROPBASER_RaWb;
2830 val |= GICR_VPROPBASER_InnerShareable;
2831 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2833 val = virt_to_phys(page_address(vpe->vpt_page)) &
2834 GENMASK_ULL(51, 16);
2835 val |= GICR_VPENDBASER_RaWaWb;
2836 val |= GICR_VPENDBASER_NonShareable;
2838 * There is no good way of finding out if the pending table is
2839 * empty as we can race against the doorbell interrupt very
2840 * easily. So in the end, vpe->pending_last is only an
2841 * indication that the vcpu has something pending, not one
2842 * that the pending table is empty. A good implementation
2843 * would be able to read its coarse map pretty quickly anyway,
2844 * making this a tolerable issue.
2846 val |= GICR_VPENDBASER_PendingLast;
2847 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2848 val |= GICR_VPENDBASER_Valid;
2849 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2852 static void its_vpe_deschedule(struct its_vpe *vpe)
2854 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2857 val = its_clear_vpend_valid(vlpi_base);
2859 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
2860 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2862 vpe->pending_last = true;
2864 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2865 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2869 static void its_vpe_invall(struct its_vpe *vpe)
2871 struct its_node *its;
2873 list_for_each_entry(its, &its_nodes, entry) {
2877 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2881 * Sending a VINVALL to a single ITS is enough, as all
2882 * we need is to reach the redistributors.
2884 its_send_vinvall(its, vpe);
2889 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2891 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2892 struct its_cmd_info *info = vcpu_info;
2894 switch (info->cmd_type) {
2896 its_vpe_schedule(vpe);
2899 case DESCHEDULE_VPE:
2900 its_vpe_deschedule(vpe);
2904 its_vpe_invall(vpe);
2912 static void its_vpe_send_cmd(struct its_vpe *vpe,
2913 void (*cmd)(struct its_device *, u32))
2915 unsigned long flags;
2917 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2919 its_vpe_db_proxy_map_locked(vpe);
2920 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2922 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2925 static void its_vpe_send_inv(struct irq_data *d)
2927 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2929 if (gic_rdists->has_direct_lpi) {
2930 void __iomem *rdbase;
2932 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2933 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2934 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2937 its_vpe_send_cmd(vpe, its_send_inv);
2941 static void its_vpe_mask_irq(struct irq_data *d)
2944 * We need to unmask the LPI, which is described by the parent
2945 * irq_data. Instead of calling into the parent (which won't
2946 * exactly do the right thing, let's simply use the
2947 * parent_data pointer. Yes, I'm naughty.
2949 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2950 its_vpe_send_inv(d);
2953 static void its_vpe_unmask_irq(struct irq_data *d)
2955 /* Same hack as above... */
2956 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2957 its_vpe_send_inv(d);
2960 static int its_vpe_set_irqchip_state(struct irq_data *d,
2961 enum irqchip_irq_state which,
2964 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2966 if (which != IRQCHIP_STATE_PENDING)
2969 if (gic_rdists->has_direct_lpi) {
2970 void __iomem *rdbase;
2972 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2974 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2976 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2977 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2982 its_vpe_send_cmd(vpe, its_send_int);
2984 its_vpe_send_cmd(vpe, its_send_clear);
2990 static int its_vpe_retrigger(struct irq_data *d)
2992 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
2995 static struct irq_chip its_vpe_irq_chip = {
2996 .name = "GICv4-vpe",
2997 .irq_mask = its_vpe_mask_irq,
2998 .irq_unmask = its_vpe_unmask_irq,
2999 .irq_eoi = irq_chip_eoi_parent,
3000 .irq_set_affinity = its_vpe_set_affinity,
3001 .irq_retrigger = its_vpe_retrigger,
3002 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
3003 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
3006 static int its_vpe_id_alloc(void)
3008 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
3011 static void its_vpe_id_free(u16 id)
3013 ida_simple_remove(&its_vpeid_ida, id);
3016 static int its_vpe_init(struct its_vpe *vpe)
3018 struct page *vpt_page;
3021 /* Allocate vpe_id */
3022 vpe_id = its_vpe_id_alloc();
3027 vpt_page = its_allocate_pending_table(GFP_KERNEL);
3029 its_vpe_id_free(vpe_id);
3033 if (!its_alloc_vpe_table(vpe_id)) {
3034 its_vpe_id_free(vpe_id);
3035 its_free_pending_table(vpt_page);
3039 vpe->vpe_id = vpe_id;
3040 vpe->vpt_page = vpt_page;
3041 vpe->vpe_proxy_event = -1;
3046 static void its_vpe_teardown(struct its_vpe *vpe)
3048 its_vpe_db_proxy_unmap(vpe);
3049 its_vpe_id_free(vpe->vpe_id);
3050 its_free_pending_table(vpe->vpt_page);
3053 static void its_vpe_irq_domain_free(struct irq_domain *domain,
3055 unsigned int nr_irqs)
3057 struct its_vm *vm = domain->host_data;
3060 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3062 for (i = 0; i < nr_irqs; i++) {
3063 struct irq_data *data = irq_domain_get_irq_data(domain,
3065 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
3067 BUG_ON(vm != vpe->its_vm);
3069 clear_bit(data->hwirq, vm->db_bitmap);
3070 its_vpe_teardown(vpe);
3071 irq_domain_reset_irq_data(data);
3074 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
3075 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
3076 its_free_prop_table(vm->vprop_page);
3080 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3081 unsigned int nr_irqs, void *args)
3083 struct its_vm *vm = args;
3084 unsigned long *bitmap;
3085 struct page *vprop_page;
3086 int base, nr_ids, i, err = 0;
3090 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
3094 if (nr_ids < nr_irqs) {
3095 its_lpi_free(bitmap, base, nr_ids);
3099 vprop_page = its_allocate_prop_table(GFP_KERNEL);
3101 its_lpi_free(bitmap, base, nr_ids);
3105 vm->db_bitmap = bitmap;
3106 vm->db_lpi_base = base;
3107 vm->nr_db_lpis = nr_ids;
3108 vm->vprop_page = vprop_page;
3110 for (i = 0; i < nr_irqs; i++) {
3111 vm->vpes[i]->vpe_db_lpi = base + i;
3112 err = its_vpe_init(vm->vpes[i]);
3115 err = its_irq_gic_domain_alloc(domain, virq + i,
3116 vm->vpes[i]->vpe_db_lpi);
3119 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
3120 &its_vpe_irq_chip, vm->vpes[i]);
3126 its_vpe_irq_domain_free(domain, virq, i);
3128 its_lpi_free(bitmap, base, nr_ids);
3129 its_free_prop_table(vprop_page);
3135 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
3136 struct irq_data *d, bool reserve)
3138 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3139 struct its_node *its;
3141 /* If we use the list map, we issue VMAPP on demand... */
3145 /* Map the VPE to the first possible CPU */
3146 vpe->col_idx = cpumask_first(cpu_online_mask);
3148 list_for_each_entry(its, &its_nodes, entry) {
3152 its_send_vmapp(its, vpe, true);
3153 its_send_vinvall(its, vpe);
3156 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3161 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3164 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3165 struct its_node *its;
3168 * If we use the list map, we unmap the VPE once no VLPIs are
3169 * associated with the VM.
3174 list_for_each_entry(its, &its_nodes, entry) {
3178 its_send_vmapp(its, vpe, false);
3182 static const struct irq_domain_ops its_vpe_domain_ops = {
3183 .alloc = its_vpe_irq_domain_alloc,
3184 .free = its_vpe_irq_domain_free,
3185 .activate = its_vpe_irq_domain_activate,
3186 .deactivate = its_vpe_irq_domain_deactivate,
3189 static int its_force_quiescent(void __iomem *base)
3191 u32 count = 1000000; /* 1s */
3194 val = readl_relaxed(base + GITS_CTLR);
3196 * GIC architecture specification requires the ITS to be both
3197 * disabled and quiescent for writes to GITS_BASER<n> or
3198 * GITS_CBASER to not have UNPREDICTABLE results.
3200 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
3203 /* Disable the generation of all interrupts to this ITS */
3204 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
3205 writel_relaxed(val, base + GITS_CTLR);
3207 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3209 val = readl_relaxed(base + GITS_CTLR);
3210 if (val & GITS_CTLR_QUIESCENT)
3222 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
3224 struct its_node *its = data;
3226 /* erratum 22375: only alloc 8MB table size */
3227 its->device_ids = 0x14; /* 20 bits, 8MB */
3228 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
3233 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
3235 struct its_node *its = data;
3237 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
3242 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
3244 struct its_node *its = data;
3246 /* On QDF2400, the size of the ITE is 16Bytes */
3252 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3254 struct its_node *its = its_dev->its;
3257 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3258 * which maps 32-bit writes targeted at a separate window of
3259 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3260 * with device ID taken from bits [device_id_bits + 1:2] of
3261 * the window offset.
3263 return its->pre_its_base + (its_dev->device_id << 2);
3266 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3268 struct its_node *its = data;
3269 u32 pre_its_window[2];
3272 if (!fwnode_property_read_u32_array(its->fwnode_handle,
3273 "socionext,synquacer-pre-its",
3275 ARRAY_SIZE(pre_its_window))) {
3277 its->pre_its_base = pre_its_window[0];
3278 its->get_msi_base = its_irq_get_msi_base_pre_its;
3280 ids = ilog2(pre_its_window[1]) - 2;
3281 if (its->device_ids > ids)
3282 its->device_ids = ids;
3284 /* the pre-ITS breaks isolation, so disable MSI remapping */
3285 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3291 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3293 struct its_node *its = data;
3296 * Hip07 insists on using the wrong address for the VLPI
3297 * page. Trick it into doing the right thing...
3299 its->vlpi_redist_offset = SZ_128K;
3303 static const struct gic_quirk its_quirks[] = {
3304 #ifdef CONFIG_CAVIUM_ERRATUM_22375
3306 .desc = "ITS: Cavium errata 22375, 24313",
3307 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3309 .init = its_enable_quirk_cavium_22375,
3312 #ifdef CONFIG_CAVIUM_ERRATUM_23144
3314 .desc = "ITS: Cavium erratum 23144",
3315 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3317 .init = its_enable_quirk_cavium_23144,
3320 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3322 .desc = "ITS: QDF2400 erratum 0065",
3323 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3325 .init = its_enable_quirk_qdf2400_e0065,
3328 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3331 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3332 * implementation, but with a 'pre-ITS' added that requires
3333 * special handling in software.
3335 .desc = "ITS: Socionext Synquacer pre-ITS",
3338 .init = its_enable_quirk_socionext_synquacer,
3341 #ifdef CONFIG_HISILICON_ERRATUM_161600802
3343 .desc = "ITS: Hip07 erratum 161600802",
3346 .init = its_enable_quirk_hip07_161600802,
3353 static void its_enable_quirks(struct its_node *its)
3355 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3357 gic_enable_quirks(iidr, its_quirks, its);
3360 static int its_save_disable(void)
3362 struct its_node *its;
3365 raw_spin_lock(&its_lock);
3366 list_for_each_entry(its, &its_nodes, entry) {
3370 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3371 err = its_force_quiescent(base);
3373 pr_err("ITS@%pa: failed to quiesce: %d\n",
3374 &its->phys_base, err);
3375 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3379 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3384 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3388 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3391 raw_spin_unlock(&its_lock);
3396 static void its_restore_enable(void)
3398 struct its_node *its;
3401 raw_spin_lock(&its_lock);
3402 list_for_each_entry(its, &its_nodes, entry) {
3409 * Make sure that the ITS is disabled. If it fails to quiesce,
3410 * don't restore it since writing to CBASER or BASER<n>
3411 * registers is undefined according to the GIC v3 ITS
3414 * Firmware resuming with the ITS enabled is terminally broken.
3416 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
3417 ret = its_force_quiescent(base);
3419 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3420 &its->phys_base, ret);
3424 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3427 * Writing CBASER resets CREADR to 0, so make CWRITER and
3428 * cmd_write line up with it.
3430 its->cmd_write = its->cmd_base;
3431 gits_write_cwriter(0, base + GITS_CWRITER);
3433 /* Restore GITS_BASER from the value cache. */
3434 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3435 struct its_baser *baser = &its->tables[i];
3437 if (!(baser->val & GITS_BASER_VALID))
3440 its_write_baser(its, baser, baser->val);
3442 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3445 * Reinit the collection if it's stored in the ITS. This is
3446 * indicated by the col_id being less than the HCC field.
3447 * CID < HCC as specified in the GIC v3 Documentation.
3449 if (its->collections[smp_processor_id()].col_id <
3450 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3451 its_cpu_init_collection(its);
3453 raw_spin_unlock(&its_lock);
3456 static struct syscore_ops its_syscore_ops = {
3457 .suspend = its_save_disable,
3458 .resume = its_restore_enable,
3461 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
3463 struct irq_domain *inner_domain;
3464 struct msi_domain_info *info;
3466 info = kzalloc(sizeof(*info), GFP_KERNEL);
3470 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
3471 if (!inner_domain) {
3476 inner_domain->parent = its_parent;
3477 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
3478 inner_domain->flags |= its->msi_domain_flags;
3479 info->ops = &its_msi_domain_ops;
3481 inner_domain->host_data = info;
3486 static int its_init_vpe_domain(void)
3488 struct its_node *its;
3492 if (gic_rdists->has_direct_lpi) {
3493 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3497 /* Any ITS will do, even if not v4 */
3498 its = list_first_entry(&its_nodes, struct its_node, entry);
3500 entries = roundup_pow_of_two(nr_cpu_ids);
3501 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
3503 if (!vpe_proxy.vpes) {
3504 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3508 /* Use the last possible DevID */
3509 devid = GENMASK(its->device_ids - 1, 0);
3510 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3511 if (!vpe_proxy.dev) {
3512 kfree(vpe_proxy.vpes);
3513 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3517 BUG_ON(entries > vpe_proxy.dev->nr_ites);
3519 raw_spin_lock_init(&vpe_proxy.lock);
3520 vpe_proxy.next_victim = 0;
3521 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3522 devid, vpe_proxy.dev->nr_ites);
3527 static int __init its_compute_its_list_map(struct resource *res,
3528 void __iomem *its_base)
3534 * This is assumed to be done early enough that we're
3535 * guaranteed to be single-threaded, hence no
3536 * locking. Should this change, we should address
3539 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3540 if (its_number >= GICv4_ITS_LIST_MAX) {
3541 pr_err("ITS@%pa: No ITSList entry available!\n",
3546 ctlr = readl_relaxed(its_base + GITS_CTLR);
3547 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3548 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3549 writel_relaxed(ctlr, its_base + GITS_CTLR);
3550 ctlr = readl_relaxed(its_base + GITS_CTLR);
3551 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3552 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3553 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3556 if (test_and_set_bit(its_number, &its_list_map)) {
3557 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3558 &res->start, its_number);
3565 static int __init its_probe_one(struct resource *res,
3566 struct fwnode_handle *handle, int numa_node)
3568 struct its_node *its;
3569 void __iomem *its_base;
3571 u64 baser, tmp, typer;
3575 its_base = ioremap(res->start, resource_size(res));
3577 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
3581 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3582 if (val != 0x30 && val != 0x40) {
3583 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
3588 err = its_force_quiescent(its_base);
3590 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
3594 pr_info("ITS %pR\n", res);
3596 its = kzalloc(sizeof(*its), GFP_KERNEL);
3602 raw_spin_lock_init(&its->lock);
3603 mutex_init(&its->dev_alloc_lock);
3604 INIT_LIST_HEAD(&its->entry);
3605 INIT_LIST_HEAD(&its->its_device_list);
3606 typer = gic_read_typer(its_base + GITS_TYPER);
3607 its->base = its_base;
3608 its->phys_base = res->start;
3609 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
3610 its->device_ids = GITS_TYPER_DEVBITS(typer);
3611 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
3613 if (!(typer & GITS_TYPER_VMOVP)) {
3614 err = its_compute_its_list_map(res, its_base);
3620 pr_info("ITS@%pa: Using ITS number %d\n",
3623 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3627 its->numa_node = numa_node;
3629 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3630 get_order(ITS_CMD_QUEUE_SZ));
3635 its->cmd_base = (void *)page_address(page);
3636 its->cmd_write = its->cmd_base;
3637 its->fwnode_handle = handle;
3638 its->get_msi_base = its_irq_get_msi_base;
3639 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
3641 its_enable_quirks(its);
3643 err = its_alloc_tables(its);
3647 err = its_alloc_collections(its);
3649 goto out_free_tables;
3651 baser = (virt_to_phys(its->cmd_base) |
3652 GITS_CBASER_RaWaWb |
3653 GITS_CBASER_InnerShareable |
3654 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3657 gits_write_cbaser(baser, its->base + GITS_CBASER);
3658 tmp = gits_read_cbaser(its->base + GITS_CBASER);
3660 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
3661 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3663 * The HW reports non-shareable, we must
3664 * remove the cacheability attributes as
3667 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3668 GITS_CBASER_CACHEABILITY_MASK);
3669 baser |= GITS_CBASER_nC;
3670 gits_write_cbaser(baser, its->base + GITS_CBASER);
3672 pr_info("ITS: using cache flushing for cmd queue\n");
3673 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3676 gits_write_cwriter(0, its->base + GITS_CWRITER);
3677 ctlr = readl_relaxed(its->base + GITS_CTLR);
3678 ctlr |= GITS_CTLR_ENABLE;
3680 ctlr |= GITS_CTLR_ImDe;
3681 writel_relaxed(ctlr, its->base + GITS_CTLR);
3683 err = its_init_domain(handle, its);
3685 goto out_free_tables;
3687 raw_spin_lock(&its_lock);
3688 list_add(&its->entry, &its_nodes);
3689 raw_spin_unlock(&its_lock);
3694 its_free_tables(its);
3696 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
3701 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
3705 static bool gic_rdists_supports_plpis(void)
3707 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
3710 static int redist_disable_lpis(void)
3712 void __iomem *rbase = gic_data_rdist_rd_base();
3713 u64 timeout = USEC_PER_SEC;
3716 if (!gic_rdists_supports_plpis()) {
3717 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3721 val = readl_relaxed(rbase + GICR_CTLR);
3722 if (!(val & GICR_CTLR_ENABLE_LPIS))
3726 * If coming via a CPU hotplug event, we don't need to disable
3727 * LPIs before trying to re-enable them. They are already
3728 * configured and all is well in the world.
3730 * If running with preallocated tables, there is nothing to do.
3732 if (gic_data_rdist()->lpi_enabled ||
3733 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
3737 * From that point on, we only try to do some damage control.
3739 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
3740 smp_processor_id());
3741 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3744 val &= ~GICR_CTLR_ENABLE_LPIS;
3745 writel_relaxed(val, rbase + GICR_CTLR);
3747 /* Make sure any change to GICR_CTLR is observable by the GIC */
3751 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3752 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3753 * Error out if we time out waiting for RWP to clear.
3755 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3757 pr_err("CPU%d: Timeout while disabling LPIs\n",
3758 smp_processor_id());
3766 * After it has been written to 1, it is IMPLEMENTATION
3767 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3768 * cleared to 0. Error out if clearing the bit failed.
3770 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3771 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3778 int its_cpu_init(void)
3780 if (!list_empty(&its_nodes)) {
3783 ret = redist_disable_lpis();
3787 its_cpu_init_lpis();
3788 its_cpu_init_collections();
3794 static const struct of_device_id its_device_id[] = {
3795 { .compatible = "arm,gic-v3-its", },
3799 static int __init its_of_probe(struct device_node *node)
3801 struct device_node *np;
3802 struct resource res;
3804 for (np = of_find_matching_node(node, its_device_id); np;
3805 np = of_find_matching_node(np, its_device_id)) {
3806 if (!of_device_is_available(np))
3808 if (!of_property_read_bool(np, "msi-controller")) {
3809 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3814 if (of_address_to_resource(np, 0, &res)) {
3815 pr_warn("%pOF: no regs?\n", np);
3819 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
3826 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3828 #ifdef CONFIG_ACPI_NUMA
3829 struct its_srat_map {
3836 static struct its_srat_map *its_srat_maps __initdata;
3837 static int its_in_srat __initdata;
3839 static int __init acpi_get_its_numa_node(u32 its_id)
3843 for (i = 0; i < its_in_srat; i++) {
3844 if (its_id == its_srat_maps[i].its_id)
3845 return its_srat_maps[i].numa_node;
3847 return NUMA_NO_NODE;
3850 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
3851 const unsigned long end)
3856 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
3857 const unsigned long end)
3860 struct acpi_srat_gic_its_affinity *its_affinity;
3862 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3866 if (its_affinity->header.length < sizeof(*its_affinity)) {
3867 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3868 its_affinity->header.length);
3872 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3874 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3875 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3879 its_srat_maps[its_in_srat].numa_node = node;
3880 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3882 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3883 its_affinity->proximity_domain, its_affinity->its_id, node);
3888 static void __init acpi_table_parse_srat_its(void)
3892 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3893 sizeof(struct acpi_table_srat),
3894 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3895 gic_acpi_match_srat_its, 0);
3899 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
3901 if (!its_srat_maps) {
3902 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3906 acpi_table_parse_entries(ACPI_SIG_SRAT,
3907 sizeof(struct acpi_table_srat),
3908 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3909 gic_acpi_parse_srat_its, 0);
3912 /* free the its_srat_maps after ITS probing */
3913 static void __init acpi_its_srat_maps_free(void)
3915 kfree(its_srat_maps);
3918 static void __init acpi_table_parse_srat_its(void) { }
3919 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
3920 static void __init acpi_its_srat_maps_free(void) { }
3923 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
3924 const unsigned long end)
3926 struct acpi_madt_generic_translator *its_entry;
3927 struct fwnode_handle *dom_handle;
3928 struct resource res;
3931 its_entry = (struct acpi_madt_generic_translator *)header;
3932 memset(&res, 0, sizeof(res));
3933 res.start = its_entry->base_address;
3934 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3935 res.flags = IORESOURCE_MEM;
3937 dom_handle = irq_domain_alloc_fwnode(&res.start);
3939 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3944 err = iort_register_domain_token(its_entry->translation_id, res.start,
3947 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3948 &res.start, its_entry->translation_id);
3952 err = its_probe_one(&res, dom_handle,
3953 acpi_get_its_numa_node(its_entry->translation_id));
3957 iort_deregister_domain_token(its_entry->translation_id);
3959 irq_domain_free_fwnode(dom_handle);
3963 static void __init its_acpi_probe(void)
3965 acpi_table_parse_srat_its();
3966 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3967 gic_acpi_parse_madt_its, 0);
3968 acpi_its_srat_maps_free();
3971 static void __init its_acpi_probe(void) { }
3974 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3975 struct irq_domain *parent_domain)
3977 struct device_node *of_node;
3978 struct its_node *its;
3979 bool has_v4 = false;
3982 its_parent = parent_domain;
3983 of_node = to_of_node(handle);
3985 its_of_probe(of_node);
3989 if (list_empty(&its_nodes)) {
3990 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3994 gic_rdists = rdists;
3996 err = allocate_lpi_tables();
4000 list_for_each_entry(its, &its_nodes, entry)
4001 has_v4 |= its->is_v4;
4003 if (has_v4 & rdists->has_vlpis) {
4004 if (its_init_vpe_domain() ||
4005 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
4006 rdists->has_vlpis = false;
4007 pr_err("ITS: Disabling GICv4 support\n");
4011 register_syscore_ops(&its_syscore_ops);