2 * Copyright 2015 Karol Herbst <nouveau@karolherbst.de>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Karol Herbst <nouveau@karolherbst.de>
27 gk104_pcie_version_supported(struct nvkm_pci *pci)
29 return (nvkm_rd32(pci->subdev.device, 0x8c1c0) & 0x4) == 0x4 ? 2 : 1;
33 gk104_pcie_set_cap_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
35 struct nvkm_device *device = pci->subdev.device;
38 case NVKM_PCIE_SPEED_2_5:
39 gf100_pcie_set_cap_speed(pci, false);
40 nvkm_mask(device, 0x8c1c0, 0x30000, 0x10000);
42 case NVKM_PCIE_SPEED_5_0:
43 gf100_pcie_set_cap_speed(pci, true);
44 nvkm_mask(device, 0x8c1c0, 0x30000, 0x20000);
46 case NVKM_PCIE_SPEED_8_0:
47 gf100_pcie_set_cap_speed(pci, true);
48 nvkm_mask(device, 0x8c1c0, 0x30000, 0x30000);
53 static enum nvkm_pcie_speed
54 gk104_pcie_cap_speed(struct nvkm_pci *pci)
56 int speed = gf100_pcie_cap_speed(pci);
59 return NVKM_PCIE_SPEED_2_5;
62 int speed2 = nvkm_rd32(pci->subdev.device, 0x8c1c0) & 0x30000;
66 return NVKM_PCIE_SPEED_2_5;
68 return NVKM_PCIE_SPEED_5_0;
70 return NVKM_PCIE_SPEED_8_0;
78 gk104_pcie_set_lnkctl_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
82 case NVKM_PCIE_SPEED_2_5:
85 case NVKM_PCIE_SPEED_5_0:
88 case NVKM_PCIE_SPEED_8_0:
92 nvkm_pci_mask(pci, 0xa8, 0x3, reg_v);
95 static enum nvkm_pcie_speed
96 gk104_pcie_lnkctl_speed(struct nvkm_pci *pci)
98 u8 reg_v = nvkm_pci_rd32(pci, 0xa8) & 0x3;
102 return NVKM_PCIE_SPEED_2_5;
104 return NVKM_PCIE_SPEED_5_0;
106 return NVKM_PCIE_SPEED_8_0;
111 static enum nvkm_pcie_speed
112 gk104_pcie_max_speed(struct nvkm_pci *pci)
114 u32 max_speed = nvkm_rd32(pci->subdev.device, 0x8c1c0) & 0x300000;
117 return NVKM_PCIE_SPEED_8_0;
119 return NVKM_PCIE_SPEED_5_0;
121 return NVKM_PCIE_SPEED_2_5;
123 return NVKM_PCIE_SPEED_2_5;
127 gk104_pcie_set_link_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
129 struct nvkm_device *device = pci->subdev.device;
133 case NVKM_PCIE_SPEED_8_0:
134 mask_value = 0x00000;
136 case NVKM_PCIE_SPEED_5_0:
137 mask_value = 0x40000;
139 case NVKM_PCIE_SPEED_2_5:
141 mask_value = 0x80000;
145 nvkm_mask(device, 0x8c040, 0xc0000, mask_value);
146 nvkm_mask(device, 0x8c040, 0x1, 0x1);
150 gk104_pcie_init(struct nvkm_pci * pci)
152 enum nvkm_pcie_speed lnkctl_speed, max_speed, cap_speed;
153 struct nvkm_subdev *subdev = &pci->subdev;
155 if (gf100_pcie_version(pci) < 2)
158 lnkctl_speed = gk104_pcie_lnkctl_speed(pci);
159 max_speed = gk104_pcie_max_speed(pci);
160 cap_speed = gk104_pcie_cap_speed(pci);
162 if (cap_speed != max_speed) {
163 nvkm_trace(subdev, "adjusting cap to max speed\n");
164 gk104_pcie_set_cap_speed(pci, max_speed);
165 cap_speed = gk104_pcie_cap_speed(pci);
166 if (cap_speed != max_speed)
167 nvkm_warn(subdev, "failed to adjust cap speed\n");
170 if (lnkctl_speed != max_speed) {
171 nvkm_debug(subdev, "adjusting lnkctl to max speed\n");
172 gk104_pcie_set_lnkctl_speed(pci, max_speed);
173 lnkctl_speed = gk104_pcie_lnkctl_speed(pci);
174 if (lnkctl_speed != max_speed)
175 nvkm_error(subdev, "failed to adjust lnkctl speed\n");
182 gk104_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
184 struct nvkm_subdev *subdev = &pci->subdev;
185 enum nvkm_pcie_speed lnk_ctl_speed = gk104_pcie_lnkctl_speed(pci);
186 enum nvkm_pcie_speed lnk_cap_speed = gk104_pcie_cap_speed(pci);
188 if (speed > lnk_cap_speed) {
189 speed = lnk_cap_speed;
190 nvkm_warn(subdev, "dropping requested speed due too low cap"
194 if (speed > lnk_ctl_speed) {
195 speed = lnk_ctl_speed;
196 nvkm_warn(subdev, "dropping requested speed due too low"
200 gk104_pcie_set_link_speed(pci, speed);
205 static const struct nvkm_pci_func
207 .init = g84_pci_init,
208 .rd32 = nv40_pci_rd32,
209 .wr08 = nv40_pci_wr08,
210 .wr32 = nv40_pci_wr32,
211 .msi_rearm = nv40_pci_msi_rearm,
213 .pcie.init = gk104_pcie_init,
214 .pcie.set_link = gk104_pcie_set_link,
216 .pcie.max_speed = gk104_pcie_max_speed,
217 .pcie.cur_speed = g84_pcie_cur_speed,
219 .pcie.set_version = gf100_pcie_set_version,
220 .pcie.version = gf100_pcie_version,
221 .pcie.version_supported = gk104_pcie_version_supported,
225 gk104_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
227 return nvkm_pci_new_(&gk104_pci_func, device, index, ppci);