2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
22 #include <core/tegra.h>
23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
27 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
31 ret = regulator_enable(tdev->vdd);
35 ret = clk_prepare_enable(tdev->clk);
38 ret = clk_prepare_enable(tdev->clk_pwr);
41 clk_set_rate(tdev->clk_pwr, 204000000);
44 reset_control_assert(tdev->rst);
47 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
52 reset_control_deassert(tdev->rst);
58 clk_disable_unprepare(tdev->clk_pwr);
60 clk_disable_unprepare(tdev->clk);
62 regulator_disable(tdev->vdd);
68 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
70 reset_control_assert(tdev->rst);
73 clk_disable_unprepare(tdev->clk_pwr);
74 clk_disable_unprepare(tdev->clk);
77 return regulator_disable(tdev->vdd);
81 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
83 #if IS_ENABLED(CONFIG_IOMMU_API)
84 struct device *dev = &tdev->pdev->dev;
85 unsigned long pgsize_bitmap;
88 mutex_init(&tdev->iommu.mutex);
90 if (iommu_present(&platform_bus_type)) {
91 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
92 if (IS_ERR(tdev->iommu.domain))
96 * A IOMMU is only usable if it supports page sizes smaller
97 * or equal to the system's PAGE_SIZE, with a preference if
100 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
101 if (pgsize_bitmap & PAGE_SIZE) {
102 tdev->iommu.pgshift = PAGE_SHIFT;
104 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
105 if (tdev->iommu.pgshift == 0) {
106 dev_warn(dev, "unsupported IOMMU page size\n");
109 tdev->iommu.pgshift -= 1;
112 ret = iommu_attach_device(tdev->iommu.domain, dev);
116 ret = nvkm_mm_init(&tdev->iommu.mm, 0,
117 (1ULL << 40) >> tdev->iommu.pgshift, 1);
125 iommu_detach_device(tdev->iommu.domain, dev);
128 iommu_domain_free(tdev->iommu.domain);
131 tdev->iommu.domain = NULL;
132 tdev->iommu.pgshift = 0;
133 dev_err(dev, "cannot initialize IOMMU MM\n");
138 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
140 #if IS_ENABLED(CONFIG_IOMMU_API)
141 if (tdev->iommu.domain) {
142 nvkm_mm_fini(&tdev->iommu.mm);
143 iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
144 iommu_domain_free(tdev->iommu.domain);
149 static struct nvkm_device_tegra *
150 nvkm_device_tegra(struct nvkm_device *device)
152 return container_of(device, struct nvkm_device_tegra, device);
155 static struct resource *
156 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
158 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
159 return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
162 static resource_size_t
163 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
165 struct resource *res = nvkm_device_tegra_resource(device, bar);
166 return res ? res->start : 0;
169 static resource_size_t
170 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
172 struct resource *res = nvkm_device_tegra_resource(device, bar);
173 return res ? resource_size(res) : 0;
177 nvkm_device_tegra_intr(int irq, void *arg)
179 struct nvkm_device_tegra *tdev = arg;
180 struct nvkm_mc *mc = tdev->device.mc;
181 bool handled = false;
183 nvkm_mc_intr_unarm(mc);
184 nvkm_mc_intr(mc, &handled);
185 nvkm_mc_intr_rearm(mc);
187 return handled ? IRQ_HANDLED : IRQ_NONE;
191 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
193 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
195 free_irq(tdev->irq, tdev);
201 nvkm_device_tegra_init(struct nvkm_device *device)
203 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
206 irq = platform_get_irq_byname(tdev->pdev, "stall");
210 ret = request_irq(irq, nvkm_device_tegra_intr,
211 IRQF_SHARED, "nvkm", tdev);
220 nvkm_device_tegra_dtor(struct nvkm_device *device)
222 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
223 nvkm_device_tegra_power_down(tdev);
224 nvkm_device_tegra_remove_iommu(tdev);
228 static const struct nvkm_device_func
229 nvkm_device_tegra_func = {
230 .tegra = nvkm_device_tegra,
231 .dtor = nvkm_device_tegra_dtor,
232 .init = nvkm_device_tegra_init,
233 .fini = nvkm_device_tegra_fini,
234 .resource_addr = nvkm_device_tegra_resource_addr,
235 .resource_size = nvkm_device_tegra_resource_size,
236 .cpu_coherent = false,
240 nvkm_device_tegra_new(struct platform_device *pdev,
241 const char *cfg, const char *dbg,
242 bool detect, bool mmio, u64 subdev_mask,
243 struct nvkm_device **pdevice)
245 struct nvkm_device_tegra *tdev;
248 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
250 *pdevice = &tdev->device;
254 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
255 if (IS_ERR(tdev->vdd))
256 return PTR_ERR(tdev->vdd);
258 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
259 if (IS_ERR(tdev->rst))
260 return PTR_ERR(tdev->rst);
262 tdev->clk = devm_clk_get(&pdev->dev, "gpu");
263 if (IS_ERR(tdev->clk))
264 return PTR_ERR(tdev->clk);
266 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
267 if (IS_ERR(tdev->clk_pwr))
268 return PTR_ERR(tdev->clk_pwr);
270 nvkm_device_tegra_probe_iommu(tdev);
272 ret = nvkm_device_tegra_power_up(tdev);
276 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
277 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
278 NVKM_DEVICE_TEGRA, pdev->id, NULL,
279 cfg, dbg, detect, mmio, subdev_mask,
288 nvkm_device_tegra_new(struct platform_device *pdev,
289 const char *cfg, const char *dbg,
290 bool detect, bool mmio, u64 subdev_mask,
291 struct nvkm_device **pdevice)