1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2020 HabanaLabs, Ltd.
8 #include <linux/slab.h>
10 #include "habanalabs.h"
12 static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
14 struct asic_fixed_properties *prop = &hdev->asic_prop;
16 return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
17 prop->dmmu.start_addr,
22 * hl_mmu_init() - initialize the MMU module.
23 * @hdev: habanalabs device structure.
25 * This function does the following:
26 * - Create a pool of pages for pgt_infos.
27 * - Create a shadow table for pgt
29 * Return: 0 for success, non-zero for failure.
31 int hl_mmu_init(struct hl_device *hdev)
34 return hdev->mmu_func.init(hdev);
40 * hl_mmu_fini() - release the MMU module.
41 * @hdev: habanalabs device structure.
43 * This function does the following:
44 * - Disable MMU in H/W.
45 * - Free the pgt_infos pool.
47 * All contexts should be freed before calling this function.
49 void hl_mmu_fini(struct hl_device *hdev)
52 hdev->mmu_func.fini(hdev);
56 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
57 * @ctx: pointer to the context structure to initialize.
59 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
60 * page tables hops related to this context.
61 * Return: 0 on success, non-zero otherwise.
63 int hl_mmu_ctx_init(struct hl_ctx *ctx)
65 struct hl_device *hdev = ctx->hdev;
68 return hdev->mmu_func.ctx_init(ctx);
74 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
76 * @ctx: pointer to the context structure
78 * This function does the following:
79 * - Free any pgts which were not freed yet
81 * - Free DRAM default page mapping hops
83 void hl_mmu_ctx_fini(struct hl_ctx *ctx)
85 struct hl_device *hdev = ctx->hdev;
88 hdev->mmu_func.ctx_fini(ctx);
92 * hl_mmu_unmap - unmaps a virtual addr
94 * @ctx: pointer to the context structure
95 * @virt_addr: virt addr to map from
96 * @page_size: size of the page to unmap
97 * @flush_pte: whether to do a PCI flush
99 * This function does the following:
100 * - Check that the virt addr is mapped
101 * - Unmap the virt addr and frees pgts if possible
102 * - Returns 0 on success, -EINVAL if the given addr is not mapped
104 * Because this function changes the page tables in the device and because it
105 * changes the MMU hash, it must be protected by a lock.
106 * However, because it maps only a single page, the lock should be implemented
107 * in a higher level in order to protect the entire mapping of the memory area
109 * For optimization reasons PCI flush may be requested once after unmapping of
112 int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
115 struct hl_device *hdev = ctx->hdev;
116 struct asic_fixed_properties *prop = &hdev->asic_prop;
117 struct hl_mmu_properties *mmu_prop;
119 u32 real_page_size, npages;
123 if (!hdev->mmu_enable)
126 is_dram_addr = is_dram_va(hdev, virt_addr);
129 mmu_prop = &prop->dmmu;
130 else if ((page_size % prop->pmmu_huge.page_size) == 0)
131 mmu_prop = &prop->pmmu_huge;
133 mmu_prop = &prop->pmmu;
136 * The H/W handles mapping of specific page sizes. Hence if the page
137 * size is bigger, we break it to sub-pages and unmap them separately.
139 if ((page_size % mmu_prop->page_size) == 0) {
140 real_page_size = mmu_prop->page_size;
143 "page size of %u is not %uKB aligned, can't unmap\n",
144 page_size, mmu_prop->page_size >> 10);
149 npages = page_size / real_page_size;
150 real_virt_addr = virt_addr;
152 for (i = 0 ; i < npages ; i++) {
153 rc = hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr);
157 real_virt_addr += real_page_size;
161 hdev->mmu_func.flush(ctx);
167 * hl_mmu_map - maps a virtual addr to physical addr
169 * @ctx: pointer to the context structure
170 * @virt_addr: virt addr to map from
171 * @phys_addr: phys addr to map to
172 * @page_size: physical page size
173 * @flush_pte: whether to do a PCI flush
175 * This function does the following:
176 * - Check that the virt addr is not mapped
177 * - Allocate pgts as necessary in order to map the virt addr to the phys
178 * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
180 * Because this function changes the page tables in the device and because it
181 * changes the MMU hash, it must be protected by a lock.
182 * However, because it maps only a single page, the lock should be implemented
183 * in a higher level in order to protect the entire mapping of the memory area
185 * For optimization reasons PCI flush may be requested once after mapping of
188 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
191 struct hl_device *hdev = ctx->hdev;
192 struct asic_fixed_properties *prop = &hdev->asic_prop;
193 struct hl_mmu_properties *mmu_prop;
194 u64 real_virt_addr, real_phys_addr;
195 u32 real_page_size, npages;
196 int i, rc, mapped_cnt = 0;
199 if (!hdev->mmu_enable)
202 is_dram_addr = is_dram_va(hdev, virt_addr);
205 mmu_prop = &prop->dmmu;
206 else if ((page_size % prop->pmmu_huge.page_size) == 0)
207 mmu_prop = &prop->pmmu_huge;
209 mmu_prop = &prop->pmmu;
212 * The H/W handles mapping of specific page sizes. Hence if the page
213 * size is bigger, we break it to sub-pages and map them separately.
215 if ((page_size % mmu_prop->page_size) == 0) {
216 real_page_size = mmu_prop->page_size;
219 "page size of %u is not %uKB aligned, can't unmap\n",
220 page_size, mmu_prop->page_size >> 10);
225 WARN_ONCE((phys_addr & (real_page_size - 1)),
226 "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
227 phys_addr, real_page_size);
229 npages = page_size / real_page_size;
230 real_virt_addr = virt_addr;
231 real_phys_addr = phys_addr;
233 for (i = 0 ; i < npages ; i++) {
234 rc = hdev->mmu_func.map(ctx, real_virt_addr, real_phys_addr,
235 real_page_size, is_dram_addr);
239 real_virt_addr += real_page_size;
240 real_phys_addr += real_page_size;
245 hdev->mmu_func.flush(ctx);
250 real_virt_addr = virt_addr;
251 for (i = 0 ; i < mapped_cnt ; i++) {
252 if (hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr))
253 dev_warn_ratelimited(hdev->dev,
254 "failed to unmap va: 0x%llx\n", real_virt_addr);
256 real_virt_addr += real_page_size;
259 hdev->mmu_func.flush(ctx);
265 * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
267 * @ctx: pointer to the context structure
270 void hl_mmu_swap_out(struct hl_ctx *ctx)
272 struct hl_device *hdev = ctx->hdev;
274 if (hdev->mmu_enable)
275 hdev->mmu_func.swap_out(ctx);
279 * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
281 * @ctx: pointer to the context structure
284 void hl_mmu_swap_in(struct hl_ctx *ctx)
286 struct hl_device *hdev = ctx->hdev;
288 if (hdev->mmu_enable)
289 hdev->mmu_func.swap_in(ctx);
292 int hl_mmu_if_set_funcs(struct hl_device *hdev)
294 if (!hdev->mmu_enable)
297 switch (hdev->asic_type) {
300 hl_mmu_v1_set_funcs(hdev);
303 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",