2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
22 #include <core/tegra.h>
23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
26 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
27 #include <asm/dma-iommu.h>
31 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
35 ret = regulator_enable(tdev->vdd);
39 ret = clk_prepare_enable(tdev->clk);
43 ret = clk_prepare_enable(tdev->clk_ref);
47 ret = clk_prepare_enable(tdev->clk_pwr);
50 clk_set_rate(tdev->clk_pwr, 204000000);
53 reset_control_assert(tdev->rst);
56 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
61 reset_control_deassert(tdev->rst);
67 clk_disable_unprepare(tdev->clk_pwr);
70 clk_disable_unprepare(tdev->clk_ref);
72 clk_disable_unprepare(tdev->clk);
74 regulator_disable(tdev->vdd);
80 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
82 reset_control_assert(tdev->rst);
85 clk_disable_unprepare(tdev->clk_pwr);
87 clk_disable_unprepare(tdev->clk_ref);
88 clk_disable_unprepare(tdev->clk);
91 return regulator_disable(tdev->vdd);
95 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
97 #if IS_ENABLED(CONFIG_IOMMU_API)
98 struct device *dev = &tdev->pdev->dev;
99 unsigned long pgsize_bitmap;
102 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
103 if (dev->archdata.mapping) {
104 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
106 arm_iommu_detach_device(dev);
107 arm_iommu_release_mapping(mapping);
111 if (!tdev->func->iommu_bit)
114 mutex_init(&tdev->iommu.mutex);
116 if (iommu_present(&platform_bus_type)) {
117 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
118 if (IS_ERR(tdev->iommu.domain))
122 * A IOMMU is only usable if it supports page sizes smaller
123 * or equal to the system's PAGE_SIZE, with a preference if
126 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
127 if (pgsize_bitmap & PAGE_SIZE) {
128 tdev->iommu.pgshift = PAGE_SHIFT;
130 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
131 if (tdev->iommu.pgshift == 0) {
132 dev_warn(dev, "unsupported IOMMU page size\n");
135 tdev->iommu.pgshift -= 1;
138 ret = iommu_attach_device(tdev->iommu.domain, dev);
142 ret = nvkm_mm_init(&tdev->iommu.mm, 0,
143 (1ULL << tdev->func->iommu_bit) >>
144 tdev->iommu.pgshift, 1);
152 iommu_detach_device(tdev->iommu.domain, dev);
155 iommu_domain_free(tdev->iommu.domain);
158 tdev->iommu.domain = NULL;
159 tdev->iommu.pgshift = 0;
160 dev_err(dev, "cannot initialize IOMMU MM\n");
165 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
167 #if IS_ENABLED(CONFIG_IOMMU_API)
168 if (tdev->iommu.domain) {
169 nvkm_mm_fini(&tdev->iommu.mm);
170 iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
171 iommu_domain_free(tdev->iommu.domain);
176 static struct nvkm_device_tegra *
177 nvkm_device_tegra(struct nvkm_device *device)
179 return container_of(device, struct nvkm_device_tegra, device);
182 static struct resource *
183 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
185 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
186 return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
189 static resource_size_t
190 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
192 struct resource *res = nvkm_device_tegra_resource(device, bar);
193 return res ? res->start : 0;
196 static resource_size_t
197 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
199 struct resource *res = nvkm_device_tegra_resource(device, bar);
200 return res ? resource_size(res) : 0;
204 nvkm_device_tegra_intr(int irq, void *arg)
206 struct nvkm_device_tegra *tdev = arg;
207 struct nvkm_device *device = &tdev->device;
208 bool handled = false;
209 nvkm_mc_intr_unarm(device);
210 nvkm_mc_intr(device, &handled);
211 nvkm_mc_intr_rearm(device);
212 return handled ? IRQ_HANDLED : IRQ_NONE;
216 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
218 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
220 free_irq(tdev->irq, tdev);
226 nvkm_device_tegra_init(struct nvkm_device *device)
228 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
231 irq = platform_get_irq_byname(tdev->pdev, "stall");
235 ret = request_irq(irq, nvkm_device_tegra_intr,
236 IRQF_SHARED, "nvkm", tdev);
245 nvkm_device_tegra_dtor(struct nvkm_device *device)
247 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
248 nvkm_device_tegra_power_down(tdev);
249 nvkm_device_tegra_remove_iommu(tdev);
253 static const struct nvkm_device_func
254 nvkm_device_tegra_func = {
255 .tegra = nvkm_device_tegra,
256 .dtor = nvkm_device_tegra_dtor,
257 .init = nvkm_device_tegra_init,
258 .fini = nvkm_device_tegra_fini,
259 .resource_addr = nvkm_device_tegra_resource_addr,
260 .resource_size = nvkm_device_tegra_resource_size,
261 .cpu_coherent = false,
265 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
266 struct platform_device *pdev,
267 const char *cfg, const char *dbg,
268 bool detect, bool mmio, u64 subdev_mask,
269 struct nvkm_device **pdevice)
271 struct nvkm_device_tegra *tdev;
274 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
280 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
281 if (IS_ERR(tdev->vdd)) {
282 ret = PTR_ERR(tdev->vdd);
286 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
287 if (IS_ERR(tdev->rst)) {
288 ret = PTR_ERR(tdev->rst);
292 tdev->clk = devm_clk_get(&pdev->dev, "gpu");
293 if (IS_ERR(tdev->clk)) {
294 ret = PTR_ERR(tdev->clk);
298 if (func->require_ref_clk)
299 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
300 if (IS_ERR(tdev->clk_ref)) {
301 ret = PTR_ERR(tdev->clk_ref);
305 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
306 if (IS_ERR(tdev->clk_pwr)) {
307 ret = PTR_ERR(tdev->clk_pwr);
312 * The IOMMU bit defines the upper limit of the GPU-addressable space.
313 * This will be refined in nouveau_ttm_init but we need to do it early
314 * for instmem to behave properly
316 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
320 nvkm_device_tegra_probe_iommu(tdev);
322 ret = nvkm_device_tegra_power_up(tdev);
326 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
327 tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
328 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
329 NVKM_DEVICE_TEGRA, pdev->id, NULL,
330 cfg, dbg, detect, mmio, subdev_mask,
335 *pdevice = &tdev->device;
340 nvkm_device_tegra_power_down(tdev);
342 nvkm_device_tegra_remove_iommu(tdev);
349 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
350 struct platform_device *pdev,
351 const char *cfg, const char *dbg,
352 bool detect, bool mmio, u64 subdev_mask,
353 struct nvkm_device **pdevice)